summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-04-22 13:56:20 -0700
committerMathieu Chartier <mathieuc@google.com>2015-06-02 09:21:27 -0700
commit3d21bdf8894e780d349c481e5c9e29fe1556051c (patch)
tree61a5231f36c0dabd73457fec81df103462a05aff /runtime
parent71f0a8a123fa27bdc857a98afebbaf0ed09dac15 (diff)
downloadart-3d21bdf8894e780d349c481e5c9e29fe1556051c.zip
art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.gz
art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.bz2
Move mirror::ArtMethod to native
Optimizing + quick tests are passing, devices boot. TODO: Test and fix bugs in mips64. Saves 16 bytes per most ArtMethod, 7.5MB reduction in system PSS. Some of the savings are from removal of virtual methods and direct methods object arrays. Bug: 19264997 (cherry picked from commit e401d146407d61eeb99f8d6176b2ac13c4df1e33) Change-Id: I622469a0cfa0e7082a2119f3d6a9491eb61e3f3d Fix some ArtMethod related bugs Added root visiting for runtime methods, not currently required since the GcRoots in these methods are null. Added missing GetInterfaceMethodIfProxy in GetMethodLine, fixes --trace run-tests 005, 044. Fixed optimizing compiler bug where we used a normal stack location instead of double on ARM64, this fixes the debuggable tests. TODO: Fix JDWP tests. Bug: 19264997 Change-Id: I7c55f69c61d1b45351fd0dc7185ffe5efad82bd3 ART: Fix casts for 64-bit pointers on 32-bit compiler. Bug: 19264997 Change-Id: Ief45cdd4bae5a43fc8bfdfa7cf744e2c57529457 Fix JDWP tests after ArtMethod change Fixes Throwable::GetStackDepth for exception event detection after internal stack trace representation change. Adds missing ArtMethod::GetInterfaceMethodIfProxy call in case of proxy method. Bug: 19264997 Change-Id: I363e293796848c3ec491c963813f62d868da44d2 Fix accidental IMT and root marking regression Was always using the conflict trampoline. Also included fix for regression in GC time caused by extra roots. Most of the regression was IMT. Fixed bug in DumpGcPerformanceInfo where we would get SIGABRT due to detached thread. EvaluateAndApplyChanges: From ~2500 -> ~1980 GC time: 8.2s -> 7.2s due to 1s less of MarkConcurrentRoots Bug: 19264997 Change-Id: I4333e80a8268c2ed1284f87f25b9f113d4f2c7e0 Fix bogus image test assert Previously we were comparing the size of the non moving space to size of the image file. Now we properly compare the size of the image space against the size of the image file. Bug: 19264997 Change-Id: I7359f1f73ae3df60c5147245935a24431c04808a [MIPS64] Fix art_quick_invoke_stub argument offsets. ArtMethod reference's size got bigger, so we need to move other args and leave enough space for ArtMethod* and 'this' pointer. This fixes mips64 boot. Bug: 19264997 Change-Id: I47198d5f39a4caab30b3b77479d5eedaad5006ab
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.mk3
-rw-r--r--runtime/arch/arch_test.cc4
-rw-r--r--runtime/arch/arm/context_arm.cc4
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc10
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S4
-rw-r--r--runtime/arch/arm/quick_entrypoints_cc_arm.cc16
-rw-r--r--runtime/arch/arm64/context_arm64.cc4
-rw-r--r--runtime/arch/arm64/fault_handler_arm64.cc11
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S46
-rw-r--r--runtime/arch/mips/context_mips.cc4
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc2
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S4
-rw-r--r--runtime/arch/mips64/context_mips64.cc4
-rw-r--r--runtime/arch/mips64/fault_handler_mips64.cc2
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S92
-rw-r--r--runtime/arch/stub_test.cc128
-rw-r--r--runtime/arch/x86/context_x86.cc4
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc11
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S6
-rw-r--r--runtime/arch/x86_64/context_x86_64.cc4
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S49
-rw-r--r--runtime/art_field-inl.h5
-rw-r--r--runtime/art_field.cc5
-rw-r--r--runtime/art_field.h6
-rw-r--r--runtime/art_method-inl.h (renamed from runtime/mirror/art_method-inl.h)246
-rw-r--r--runtime/art_method.cc (renamed from runtime/mirror/art_method.cc)108
-rw-r--r--runtime/art_method.h (renamed from runtime/mirror/art_method.h)249
-rw-r--r--runtime/asm_support.h40
-rw-r--r--runtime/base/arena_allocator.cc12
-rw-r--r--runtime/base/arena_allocator.h38
-rw-r--r--runtime/base/iteration_range.h11
-rw-r--r--runtime/base/macros.h1
-rw-r--r--runtime/base/scoped_arena_containers.h6
-rw-r--r--runtime/check_jni.cc27
-rw-r--r--runtime/check_reference_map_visitor.h8
-rw-r--r--runtime/class_linker-inl.h45
-rw-r--r--runtime/class_linker.cc1281
-rw-r--r--runtime/class_linker.h110
-rw-r--r--runtime/class_linker_test.cc134
-rw-r--r--runtime/common_runtime_test.cc5
-rw-r--r--runtime/common_throws.cc36
-rw-r--r--runtime/common_throws.h20
-rw-r--r--runtime/debugger.cc211
-rw-r--r--runtime/debugger.h50
-rw-r--r--runtime/dex_file.cc4
-rw-r--r--runtime/dex_file.h4
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h97
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc29
-rw-r--r--runtime/entrypoints/entrypoint_utils.h45
-rw-r--r--runtime/entrypoints/interpreter/interpreter_entrypoints.cc6
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/callee_save_frame.h2
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc56
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h8
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc1
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h2
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h26
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc50
-rw-r--r--runtime/entrypoints/quick/quick_fillarray_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc7
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc6
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc182
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc11
-rw-r--r--runtime/entrypoints/runtime_asm_entrypoints.h8
-rw-r--r--runtime/exception_test.cc15
-rw-r--r--runtime/fault_handler.cc20
-rw-r--r--runtime/fault_handler.h5
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc21
-rw-r--r--runtime/gc/accounting/space_bitmap.cc2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc1
-rw-r--r--runtime/gc/collector/mark_sweep.cc6
-rw-r--r--runtime/gc/heap-inl.h1
-rw-r--r--runtime/gc/heap.cc7
-rw-r--r--runtime/gc/reference_processor.cc2
-rw-r--r--runtime/gc/space/image_space.cc62
-rw-r--r--runtime/gc/space/space_test.h2
-rw-r--r--runtime/globals.h4
-rw-r--r--runtime/handle_scope.h4
-rw-r--r--runtime/image.cc37
-rw-r--r--runtime/image.h103
-rw-r--r--runtime/indirect_reference_table-inl.h1
-rw-r--r--runtime/instrumentation.cc112
-rw-r--r--runtime/instrumentation.h85
-rw-r--r--runtime/instrumentation_test.cc59
-rw-r--r--runtime/intern_table.cc1
-rw-r--r--runtime/interpreter/interpreter.h4
-rw-r--r--runtime/interpreter/interpreter_common.cc30
-rw-r--r--runtime/interpreter/interpreter_common.h12
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc12
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc12
-rw-r--r--runtime/interpreter/unstarted_runtime.cc318
-rw-r--r--runtime/interpreter/unstarted_runtime.h8
-rw-r--r--runtime/interpreter/unstarted_runtime_list.h1
-rw-r--r--runtime/interpreter/unstarted_runtime_test.cc7
-rw-r--r--runtime/java_vm_ext.cc10
-rw-r--r--runtime/java_vm_ext.h6
-rw-r--r--runtime/jdwp/jdwp.h4
-rw-r--r--runtime/jdwp/jdwp_event.cc1
-rw-r--r--runtime/jit/jit.cc6
-rw-r--r--runtime/jit/jit.h9
-rw-r--r--runtime/jit/jit_code_cache.cc8
-rw-r--r--runtime/jit/jit_code_cache.h14
-rw-r--r--runtime/jit/jit_code_cache_test.cc21
-rw-r--r--runtime/jit/jit_instrumentation.cc16
-rw-r--r--runtime/jit/jit_instrumentation.h20
-rw-r--r--runtime/jni_internal.cc60
-rw-r--r--runtime/jni_internal_test.cc2
-rw-r--r--runtime/linear_alloc.cc15
-rw-r--r--runtime/linear_alloc.h12
-rw-r--r--runtime/mirror/abstract_method.cc16
-rw-r--r--runtime/mirror/abstract_method.h12
-rw-r--r--runtime/mirror/array-inl.h32
-rw-r--r--runtime/mirror/array.cc20
-rw-r--r--runtime/mirror/array.h16
-rw-r--r--runtime/mirror/class-inl.h315
-rw-r--r--runtime/mirror/class.cc250
-rw-r--r--runtime/mirror/class.h282
-rw-r--r--runtime/mirror/dex_cache-inl.h52
-rw-r--r--runtime/mirror/dex_cache.cc28
-rw-r--r--runtime/mirror/dex_cache.h44
-rw-r--r--runtime/mirror/field-inl.h12
-rw-r--r--runtime/mirror/field.cc1
-rw-r--r--runtime/mirror/iftable.h23
-rw-r--r--runtime/mirror/method.cc8
-rw-r--r--runtime/mirror/method.h4
-rw-r--r--runtime/mirror/object-inl.h66
-rw-r--r--runtime/mirror/object.cc3
-rw-r--r--runtime/mirror/object.h26
-rw-r--r--runtime/mirror/object_array.h4
-rw-r--r--runtime/mirror/object_test.cc24
-rw-r--r--runtime/mirror/reference-inl.h4
-rw-r--r--runtime/mirror/reference.cc2
-rw-r--r--runtime/mirror/reference.h2
-rw-r--r--runtime/mirror/string-inl.h4
-rw-r--r--runtime/mirror/string.h2
-rw-r--r--runtime/mirror/throwable.cc34
-rw-r--r--runtime/monitor.cc13
-rw-r--r--runtime/monitor.h8
-rw-r--r--runtime/monitor_android.cc2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc14
-rw-r--r--runtime/native/dalvik_system_VMStack.cc13
-rw-r--r--runtime/native/java_lang_Class.cc175
-rw-r--r--runtime/native/java_lang_DexCache.cc1
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc3
-rw-r--r--runtime/native/java_lang_reflect_Field.cc1
-rw-r--r--runtime/native/java_lang_reflect_Method.cc12
-rw-r--r--runtime/native/scoped_fast_native_object_access.h4
-rw-r--r--runtime/native/sun_misc_Unsafe.cc2
-rw-r--r--runtime/native_bridge_art_interface.cc47
-rw-r--r--runtime/nth_caller_visitor.h6
-rw-r--r--runtime/oat_file-inl.h16
-rw-r--r--runtime/oat_file.cc5
-rw-r--r--runtime/oat_file.h2
-rw-r--r--runtime/object_lock.cc1
-rw-r--r--runtime/primitive.h1
-rw-r--r--runtime/profiler.cc26
-rw-r--r--runtime/profiler.h14
-rw-r--r--runtime/proxy_test.cc16
-rw-r--r--runtime/quick/inline_method_analyser.cc7
-rw-r--r--runtime/quick_exception_handler.cc50
-rw-r--r--runtime/quick_exception_handler.h12
-rw-r--r--runtime/read_barrier.h3
-rw-r--r--runtime/reference_table_test.cc2
-rw-r--r--runtime/reflection.cc65
-rw-r--r--runtime/reflection.h2
-rw-r--r--runtime/reflection_test.cc34
-rw-r--r--runtime/runtime-inl.h38
-rw-r--r--runtime/runtime.cc126
-rw-r--r--runtime/runtime.h73
-rw-r--r--runtime/scoped_thread_state_change.h11
-rw-r--r--runtime/stack.cc136
-rw-r--r--runtime/stack.h103
-rw-r--r--runtime/stride_iterator.h70
-rw-r--r--runtime/thread.cc141
-rw-r--r--runtime/thread.h12
-rw-r--r--runtime/trace.cc109
-rw-r--r--runtime/trace.h32
-rw-r--r--runtime/transaction.cc1
-rw-r--r--runtime/transaction_test.cc2
-rw-r--r--runtime/utils.cc13
-rw-r--r--runtime/utils.h13
-rw-r--r--runtime/utils_test.cc8
-rw-r--r--runtime/verifier/method_verifier.cc181
-rw-r--r--runtime/verifier/method_verifier.h35
-rw-r--r--runtime/verify_object-inl.h1
-rw-r--r--runtime/well_known_classes.cc21
-rw-r--r--runtime/well_known_classes.h1
188 files changed, 4024 insertions, 3724 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index a4fa24d..b38f9bc 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -20,6 +20,7 @@ include art/build/Android.common_build.mk
LIBART_COMMON_SRC_FILES := \
art_field.cc \
+ art_method.cc \
atomic.cc.arm \
barrier.cc \
base/allocator.cc \
@@ -102,7 +103,6 @@ LIBART_COMMON_SRC_FILES := \
mem_map.cc \
memory_region.cc \
mirror/abstract_method.cc \
- mirror/art_method.cc \
mirror/array.cc \
mirror/class.cc \
mirror/dex_cache.cc \
@@ -317,6 +317,7 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
gc/space/region_space.h \
gc/space/space.h \
gc/heap.h \
+ image.h \
instrumentation.h \
indirect_reference_table.h \
invoke_type.h \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 5733ab6..40e2cd3 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -16,8 +16,8 @@
#include <stdint.h>
+#include "art_method-inl.h"
#include "common_runtime_test.h"
-#include "mirror/art_method-inl.h"
#include "quick/quick_method_frame_info.h"
namespace art {
@@ -38,7 +38,7 @@ class ArchTest : public CommonRuntimeTest {
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
+ ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index c0e658c..403d348 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -16,8 +16,8 @@
#include "context_arm.h"
+#include "art_method-inl.h"
#include "base/bit_utils.h"
-#include "mirror/art_method-inl.h"
#include "quick/quick_method_frame_info.h"
namespace art {
@@ -36,7 +36,7 @@ void ArmContext::Reset() {
}
void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
- mirror::ArtMethod* method = fr.GetMethod();
+ ArtMethod* method = fr.GetMethod();
const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
int spill_pos = 0;
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index d84cb53..90b0d53 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -18,13 +18,13 @@
#include "fault_handler.h"
#include <sys/ucontext.h>
+
+#include "art_method-inl.h"
#include "base/macros.h"
#include "base/hex_dump.h"
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
-#include "mirror/art_method.h"
-#include "mirror/art_method-inl.h"
#include "thread.h"
#include "thread-inl.h"
@@ -65,7 +65,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info
}
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
- mirror::ArtMethod** out_method,
+ ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
@@ -81,10 +81,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm));
if (overflow_addr == fault_addr) {
- *out_method = reinterpret_cast<mirror::ArtMethod*>(sc->arm_r0);
+ *out_method = reinterpret_cast<ArtMethod*>(sc->arm_r0);
} else {
// The method is at the top of the stack.
- *out_method = reinterpret_cast<mirror::ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]);
+ *out_method = reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]);
}
// Work out the return PC. This will be the address of the instruction
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 7488578..5ae291a 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -423,7 +423,7 @@ ENTRY art_quick_invoke_stub_internal
mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
#endif
- ldr ip, [r0, #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code
+ ldr ip, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code
blx ip @ call the method
mov sp, r11 @ restore the stack pointer
@@ -895,7 +895,7 @@ END art_quick_proxy_invoke_handler
*/
ENTRY art_quick_imt_conflict_trampoline
ldr r0, [sp, #0] @ load caller Method*
- ldr r0, [r0, #MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET] @ load dex_cache_resolved_methods
+ ldr r0, [r0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET] @ load dex_cache_resolved_methods
add r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET @ get starting address of data
ldr r0, [r0, r12, lsl 2] @ load the target method
b art_quick_invoke_interface_trampoline
diff --git a/runtime/arch/arm/quick_entrypoints_cc_arm.cc b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
index a3acd7e..ce531f0 100644
--- a/runtime/arch/arm/quick_entrypoints_cc_arm.cc
+++ b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
@@ -14,23 +14,23 @@
* limitations under the License.
*/
-#include "mirror/art_method.h"
+#include "art_method.h"
#include "utils.h" // For RoundUp().
namespace art {
// Assembly stub that does the final part of the up-call into Java.
-extern "C" void art_quick_invoke_stub_internal(mirror::ArtMethod*, uint32_t*, uint32_t,
+extern "C" void art_quick_invoke_stub_internal(ArtMethod*, uint32_t*, uint32_t,
Thread* self, JValue* result, uint32_t, uint32_t*,
uint32_t*);
template <bool kIsStatic>
-static void quick_invoke_reg_setup(mirror::ArtMethod* method, uint32_t* args, uint32_t args_size,
+static void quick_invoke_reg_setup(ArtMethod* method, uint32_t* args, uint32_t args_size,
Thread* self, JValue* result, const char* shorty) {
// Note: We do not follow aapcs ABI in quick code for both softfp and hardfp.
uint32_t core_reg_args[4]; // r0 ~ r3
uint32_t fp_reg_args[16]; // s0 ~ s15 (d0 ~ d7)
- uint32_t gpr_index = 1; // Index into core registers. Reserve r0 for mirror::ArtMethod*.
+ uint32_t gpr_index = 1; // Index into core registers. Reserve r0 for ArtMethod*.
uint32_t fpr_index = 0; // Index into float registers.
uint32_t fpr_double_index = 0; // Index into float registers for doubles.
uint32_t arg_index = 0; // Index into argument array.
@@ -99,16 +99,16 @@ static void quick_invoke_reg_setup(mirror::ArtMethod* method, uint32_t* args, ui
core_reg_args, fp_reg_args);
}
-// Called by art::mirror::ArtMethod::Invoke to do entry into a non-static method.
+// Called by art::ArtMethod::Invoke to do entry into a non-static method.
// TODO: migrate into an assembly implementation as with ARM64.
-extern "C" void art_quick_invoke_stub(mirror::ArtMethod* method, uint32_t* args, uint32_t args_size,
+extern "C" void art_quick_invoke_stub(ArtMethod* method, uint32_t* args, uint32_t args_size,
Thread* self, JValue* result, const char* shorty) {
quick_invoke_reg_setup<false>(method, args, args_size, self, result, shorty);
}
-// Called by art::mirror::ArtMethod::Invoke to do entry into a static method.
+// Called by art::ArtMethod::Invoke to do entry into a static method.
// TODO: migrate into an assembly implementation as with ARM64.
-extern "C" void art_quick_invoke_static_stub(mirror::ArtMethod* method, uint32_t* args,
+extern "C" void art_quick_invoke_static_stub(ArtMethod* method, uint32_t* args,
uint32_t args_size, Thread* self, JValue* result,
const char* shorty) {
quick_invoke_reg_setup<true>(method, args, args_size, self, result, shorty);
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index cb5b9e1..91c0fc9 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -18,8 +18,8 @@
#include "context_arm64.h"
+#include "art_method-inl.h"
#include "base/bit_utils.h"
-#include "mirror/art_method-inl.h"
#include "quick/quick_method_frame_info.h"
namespace art {
@@ -38,7 +38,7 @@ void Arm64Context::Reset() {
}
void Arm64Context::FillCalleeSaves(const StackVisitor& fr) {
- mirror::ArtMethod* method = fr.GetMethod();
+ ArtMethod* method = fr.GetMethod();
const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
int spill_pos = 0;
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 0448c76..3e9ad0d 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -16,14 +16,15 @@
#include "fault_handler.h"
+
#include <sys/ucontext.h>
+
+#include "art_method-inl.h"
#include "base/macros.h"
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
#include "registers_arm64.h"
-#include "mirror/art_method.h"
-#include "mirror/art_method-inl.h"
#include "thread.h"
#include "thread-inl.h"
@@ -53,7 +54,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info
}
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
- mirror::ArtMethod** out_method,
+ ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
@@ -69,10 +70,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm64));
if (overflow_addr == fault_addr) {
- *out_method = reinterpret_cast<mirror::ArtMethod*>(sc->regs[0]);
+ *out_method = reinterpret_cast<ArtMethod*>(sc->regs[0]);
} else {
// The method is at the top of the stack.
- *out_method = (reinterpret_cast<StackReference<mirror::ArtMethod>* >(*out_sp)[0]).AsMirrorPtr();
+ *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
}
// Work out the return PC. This will be the address of the instruction
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index f8b0734..f90a6b0 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -32,7 +32,8 @@
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr wIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
+ // Loads appropriate callee-save-method.
+ ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
sub sp, sp, #176
.cfi_adjust_cfa_offset 176
@@ -97,7 +98,8 @@
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr wIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
+ // Loads appropriate callee-save-method.
+ ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
sub sp, sp, #112
.cfi_adjust_cfa_offset 112
@@ -271,7 +273,7 @@
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr wIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
@@ -483,7 +485,7 @@ ENTRY \c_name
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
- ldr w2, [sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE] // pass caller Method*
+ ldr x2, [sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE] // pass caller Method*
mov x3, xSELF // pass Thread::Current
mov x4, sp
bl \cxx_name // (method_idx, this, caller, Thread*, SP)
@@ -508,14 +510,14 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
.macro INVOKE_STUB_CREATE_FRAME
SAVE_SIZE=15*8 // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
-SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
+SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
mov x9, sp // Save stack pointer.
.cfi_register sp,x9
add x10, x2, # SAVE_SIZE_AND_METHOD // calculate size of frame.
- sub x10, sp, x10 // Calculate SP position - saves + ArtMethod* + args
+ sub x10, sp, x10 // Calculate SP position - saves + ArtMethod* + args
and x10, x10, # ~0xf // Enforce 16 byte stack alignment.
mov sp, x10 // Set new SP.
@@ -565,7 +567,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
// W2 - args length
// X9 - destination address.
// W10 - temporary
- add x9, sp, #4 // Destination address is bottom of stack + null.
+ add x9, sp, #8 // Destination address is bottom of stack + null.
// Use \@ to differentiate between macro invocations.
.LcopyParams\@:
@@ -579,18 +581,14 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
.LendCopyParams\@:
- // Store null into StackReference<Method>* at bottom of frame.
- str wzr, [sp]
-
-#if (STACK_REFERENCE_SIZE != 4)
-#error "STACK_REFERENCE_SIZE(ARM64) size not as expected."
-#endif
+ // Store null into ArtMethod* at bottom of frame.
+ str xzr, [sp]
.endm
.macro INVOKE_STUB_CALL_AND_RETURN
// load method-> METHOD_QUICK_CODE_OFFSET
- ldr x9, [x0 , #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64]
+ ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
// Branch to method.
blr x9
@@ -681,7 +679,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
* | uint32_t out[n-1] |
* | : : | Outs
* | uint32_t out[0] |
- * | StackRef<ArtMethod> | <- SP value=null
+ * | ArtMethod* | <- SP value=null
* +----------------------+
*
* Outgoing registers:
@@ -1314,7 +1312,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x2, xSELF // pass Thread::Current
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1326,7 +1324,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr w2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x3, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1338,7 +1336,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr w3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x4, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1401,7 +1399,7 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE
ENTRY art_quick_set64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x3, x1 // Store value
- ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x2, x3 // Put value param
mov x3, xSELF // pass Thread::Current
bl artSet64StaticFromCode
@@ -1468,10 +1466,10 @@ END art_quick_proxy_invoke_handler
* dex method index.
*/
ENTRY art_quick_imt_conflict_trampoline
- ldr w0, [sp, #0] // load caller Method*
- ldr w0, [x0, #MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods
- add x0, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET // get starting address of data
- ldr w0, [x0, xIP1, lsl 2] // load the target method
+ ldr x0, [sp, #0] // load caller Method*
+ ldr w0, [x0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods
+ add x0, x0, #MIRROR_LONG_ARRAY_DATA_OFFSET // get starting address of data
+ ldr x0, [x0, xIP1, lsl 3] // load the target method
b art_quick_invoke_interface_trampoline
END art_quick_imt_conflict_trampoline
@@ -1482,7 +1480,7 @@ ENTRY art_quick_resolution_trampoline
bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP)
cbz x0, 1f
mov xIP0, x0 // Remember returned code pointer in xIP0.
- ldr w0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
+ ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
br xIP0
1:
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 24892e9..53f2b65 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -16,8 +16,8 @@
#include "context_mips.h"
+#include "art_method-inl.h"
#include "base/bit_utils.h"
-#include "mirror/art_method-inl.h"
#include "quick/quick_method_frame_info.h"
namespace art {
@@ -36,7 +36,7 @@ void MipsContext::Reset() {
}
void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
- mirror::ArtMethod* method = fr.GetMethod();
+ ArtMethod* method = fr.GetMethod();
const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
int spill_pos = 0;
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index c9949d4..abe495b 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -35,7 +35,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
void* context ATTRIBUTE_UNUSED,
- mirror::ArtMethod** out_method ATTRIBUTE_UNUSED,
+ ArtMethod** out_method ATTRIBUTE_UNUSED,
uintptr_t* out_return_pc ATTRIBUTE_UNUSED,
uintptr_t* out_sp ATTRIBUTE_UNUSED) {
}
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index f3d2274..f1e6edb 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -526,7 +526,7 @@ ENTRY art_quick_invoke_stub
lw $a1, 4($sp) # copy arg value for a1
lw $a2, 8($sp) # copy arg value for a2
lw $a3, 12($sp) # copy arg value for a3
- lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
+ lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store null for method* at bottom of frame
move $sp, $fp # restore the stack
@@ -1103,7 +1103,7 @@ END art_quick_proxy_invoke_handler
*/
ENTRY art_quick_imt_conflict_trampoline
lw $a0, 0($sp) # load caller Method*
- lw $a0, MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods
+ lw $a0, ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods
sll $t0, 2 # convert target method offset to bytes
add $a0, $t0 # get address of target method
lw $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
index 8ce6cf0..6637c37 100644
--- a/runtime/arch/mips64/context_mips64.cc
+++ b/runtime/arch/mips64/context_mips64.cc
@@ -16,8 +16,8 @@
#include "context_mips64.h"
+#include "art_method-inl.h"
#include "base/bit_utils.h"
-#include "mirror/art_method-inl.h"
#include "quick/quick_method_frame_info.h"
namespace art {
@@ -36,7 +36,7 @@ void Mips64Context::Reset() {
}
void Mips64Context::FillCalleeSaves(const StackVisitor& fr) {
- mirror::ArtMethod* method = fr.GetMethod();
+ ArtMethod* method = fr.GetMethod();
const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
int spill_pos = 0;
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 7b5cd49..277c2b2 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -35,7 +35,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
void* context ATTRIBUTE_UNUSED,
- mirror::ArtMethod** out_method ATTRIBUTE_UNUSED,
+ ArtMethod** out_method ATTRIBUTE_UNUSED,
uintptr_t* out_return_pc ATTRIBUTE_UNUSED,
uintptr_t* out_sp ATTRIBUTE_UNUSED) {
}
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index ff79b5d..227fe7e 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -90,8 +90,8 @@
ld $v0, %got(_ZN3art7Runtime9instance_E)($gp)
ld $v0, 0($v0)
THIS_LOAD_REQUIRES_READ_BARRIER
- lwu $v0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($v0)
- sw $v0, 0($sp) # Place Method* at bottom of stack.
+ ld $v0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($v0)
+ sd $v0, 0($sp) # Place ArtMethod* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
@@ -133,8 +133,8 @@
ld $v0, %got(_ZN3art7Runtime9instance_E)($gp)
ld $v0, 0($v0)
THIS_LOAD_REQUIRES_READ_BARRIER
- lwu $v0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($v0)
- sw $v0, 0($sp) # Place Method* at bottom of stack.
+ ld $v0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($v0)
+ sd $v0, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
@@ -256,14 +256,14 @@
ld $v0, %got(_ZN3art7Runtime9instance_E)($gp)
ld $v0, 0($v0)
THIS_LOAD_REQUIRES_READ_BARRIER
- lwu $v0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($v0)
- sw $v0, 0($sp) # Place Method* at bottom of stack.
+ ld $v0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($v0)
+ sd $v0, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
- sw $a0, 0($sp) # Place Method* at bottom of stack.
+ sd $a0, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
@@ -642,7 +642,7 @@ ENTRY_NO_GP art_quick_invoke_stub
move $s1, $a3 # move managed thread pointer into s1 (rSELF)
move $s8, $sp # save sp in s8 (fp)
- daddiu $t3, $a2, 20 # add 4 for method* and 16 for stack alignment
+ daddiu $t3, $a2, 24 # add 8 for ArtMethod* and 16 for stack alignment
dsrl $t3, $t3, 4 # shift the frame size right 4
dsll $t3, $t3, 4 # shift the frame size left 4 to align to 16 bytes
dsubu $sp, $sp, $t3 # reserve stack space for argument array
@@ -650,7 +650,7 @@ ENTRY_NO_GP art_quick_invoke_stub
daddiu $t0, $a5, 1 # t0 = shorty[1] (skip 1 for return type)
daddiu $t1, $a1, 4 # t1 = ptr to arg_array[4] (skip this ptr)
daddiu $t2, $a2, -4 # t2 = number of argument bytes remain (skip this ptr)
- daddiu $v0, $sp, 8 # v0 points to where to copy arg_array
+ daddiu $v0, $sp, 12 # v0 points to where to copy arg_array
LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_fn
LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_fn
LOOP_OVER_SHORTY_LOADING_REG a4, f16, call_fn
@@ -671,9 +671,9 @@ ENTRY_NO_GP art_quick_invoke_stub
call_fn:
# call method (a0 and a1 have been untouched)
lwu $a1, 0($a1) # make a1 = this ptr
- sw $a1, 4($sp) # copy this ptr (skip 4 bytes for method*)
- sw $zero, 0($sp) # store null for method* at bottom of frame
- ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
+ sw $a1, 8($sp) # copy this ptr (skip 8 bytes for ArtMethod*)
+ sd $zero, 0($sp) # store null for ArtMethod* at bottom of frame
+ ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
move $sp, $s8 # restore sp
@@ -745,7 +745,7 @@ ENTRY_NO_GP art_quick_invoke_static_stub
move $s1, $a3 # move managed thread pointer into s1 (rSELF)
move $s8, $sp # save sp in s8 (fp)
- daddiu $t3, $a2, 20 # add 4 for method* and 16 for stack alignment
+ daddiu $t3, $a2, 24 # add 8 for ArtMethod* and 16 for stack alignment
dsrl $t3, $t3, 4 # shift the frame size right 4
dsll $t3, $t3, 4 # shift the frame size left 4 to align to 16 bytes
dsubu $sp, $sp, $t3 # reserve stack space for argument array
@@ -753,7 +753,7 @@ ENTRY_NO_GP art_quick_invoke_static_stub
daddiu $t0, $a5, 1 # t0 = shorty[1] (skip 1 for return type)
move $t1, $a1 # t1 = arg_array
move $t2, $a2 # t2 = number of argument bytes remain
- daddiu $v0, $sp, 4 # v0 points to where to copy arg_array
+ daddiu $v0, $sp, 8 # v0 points to where to copy arg_array
LOOP_OVER_SHORTY_LOADING_REG a1, f13, call_sfn
LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_sfn
LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_sfn
@@ -774,8 +774,8 @@ ENTRY_NO_GP art_quick_invoke_static_stub
call_sfn:
# call method (a0 has been untouched)
- sw $zero, 0($sp) # store null for method* at bottom of frame
- ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
+ sd $zero, 0($sp) # store null for ArtMethod* at bottom of frame
+ ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
move $sp, $s8 # restore sp
@@ -822,7 +822,7 @@ END art_quick_invoke_static_stub
.extern artHandleFillArrayDataFromCode
ENTRY art_quick_handle_fill_data
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -970,7 +970,7 @@ END art_quick_aput_obj
.extern artGetBooleanStaticFromCode
ENTRY art_quick_get_boolean_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -982,7 +982,7 @@ END art_quick_get_boolean_static
.extern artGetByteStaticFromCode
ENTRY art_quick_get_byte_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -994,7 +994,7 @@ END art_quick_get_byte_static
.extern artGetCharStaticFromCode
ENTRY art_quick_get_char_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1006,7 +1006,7 @@ END art_quick_get_char_static
.extern artGetShortStaticFromCode
ENTRY art_quick_get_short_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1018,7 +1018,7 @@ END art_quick_get_short_static
.extern artGet32StaticFromCode
ENTRY art_quick_get32_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1030,7 +1030,7 @@ END art_quick_get32_static
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1042,7 +1042,7 @@ END art_quick_get64_static
.extern artGetObjStaticFromCode
ENTRY art_quick_get_obj_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1054,7 +1054,7 @@ END art_quick_get_obj_static
.extern artGetBooleanInstanceFromCode
ENTRY art_quick_get_boolean_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1066,7 +1066,7 @@ END art_quick_get_boolean_instance
.extern artGetByteInstanceFromCode
ENTRY art_quick_get_byte_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1078,7 +1078,7 @@ END art_quick_get_byte_instance
.extern artGetCharInstanceFromCode
ENTRY art_quick_get_char_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1090,7 +1090,7 @@ END art_quick_get_char_instance
.extern artGetShortInstanceFromCode
ENTRY art_quick_get_short_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1102,7 +1102,7 @@ END art_quick_get_short_instance
.extern artGet32InstanceFromCode
ENTRY art_quick_get32_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1114,7 +1114,7 @@ END art_quick_get32_instance
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1126,7 +1126,7 @@ END art_quick_get64_instance
.extern artGetObjInstanceFromCode
ENTRY art_quick_get_obj_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
@@ -1138,7 +1138,7 @@ END art_quick_get_obj_instance
.extern artSet8StaticFromCode
ENTRY art_quick_set8_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1150,7 +1150,7 @@ END art_quick_set8_static
.extern artSet16StaticFromCode
ENTRY art_quick_set16_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1162,7 +1162,7 @@ END art_quick_set16_static
.extern artSet32StaticFromCode
ENTRY art_quick_set32_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1175,7 +1175,7 @@ END art_quick_set32_static
ENTRY art_quick_set64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
move $a2, $a1 # pass new_val
- lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1187,7 +1187,7 @@ END art_quick_set64_static
.extern artSetObjStaticFromCode
ENTRY art_quick_set_obj_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1199,7 +1199,7 @@ END art_quick_set_obj_static
.extern artSet8InstanceFromCode
ENTRY art_quick_set8_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1211,7 +1211,7 @@ END art_quick_set8_instance
.extern artSet16InstanceFromCode
ENTRY art_quick_set16_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1223,7 +1223,7 @@ END art_quick_set16_instance
.extern artSet32InstanceFromCode
ENTRY art_quick_set32_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1235,7 +1235,7 @@ END art_quick_set32_instance
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1247,7 +1247,7 @@ END art_quick_set64_instance
.extern artSetObjInstanceFromCode
ENTRY art_quick_set_obj_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
@@ -1366,14 +1366,14 @@ END art_quick_proxy_invoke_handler
* dex method index.
*/
ENTRY art_quick_imt_conflict_trampoline
- lwu $a0, 0($sp) # load caller Method*
- lwu $a0, MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods
- dsll $t0, 2 # convert target method offset to bytes
+ ld $a0, 0($sp) # load caller Method*
+ lwu $a0, ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods
+ dsll $t0, 3 # convert target method offset to bytes
daddu $a0, $t0 # get address of target method
dla $t9, art_quick_invoke_interface_trampoline
.cpreturn
jalr $zero, $t9
- lwu $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method
+ lwu $a0, MIRROR_LONG_ARRAY_DATA_OFFSET($a0) # load the target method
END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
@@ -1383,7 +1383,7 @@ ENTRY art_quick_resolution_trampoline
jal artQuickResolutionTrampoline # (Method* called, receiver, Thread*, SP)
move $a3, $sp # pass $sp
beq $v0, $zero, 1f
- lwu $a0, 0($sp) # load resolved method in $a0
+ ld $a0, 0($sp) # load resolved method in $a0
# artQuickResolutionTrampoline puts resolved method in *SP
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index a7d24b8..13acaa7 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -17,10 +17,10 @@
#include <cstdio>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change.h"
@@ -70,7 +70,7 @@ class StubTest : public CommonRuntimeTest {
// TODO: Set up a frame according to referrer's specs.
size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
- mirror::ArtMethod* referrer) {
+ ArtMethod* referrer) {
// Push a transition back into managed code onto the linked list in thread.
ManagedStack fragment;
self->PushManagedStackFragment(&fragment);
@@ -420,7 +420,7 @@ class StubTest : public CommonRuntimeTest {
// TODO: Set up a frame according to referrer's specs.
size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
- Thread* self, mirror::ArtMethod* referrer, size_t hidden) {
+ Thread* self, ArtMethod* referrer, size_t hidden) {
// Push a transition back into managed code onto the linked list in thread.
ManagedStack fragment;
self->PushManagedStackFragment(&fragment);
@@ -776,7 +776,7 @@ class StubTest : public CommonRuntimeTest {
// Method with 32b arg0, 64b arg1
size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
- mirror::ArtMethod* referrer) {
+ ArtMethod* referrer) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
defined(__aarch64__)
// Just pass through.
@@ -1282,7 +1282,8 @@ TEST_F(StubTest, AllocObject) {
{
// Use an arbitrary method from c to use as referrer
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
- reinterpret_cast<size_t>(c->GetVirtualMethod(0)), // arbitrary
+ // arbitrary
+ reinterpret_cast<size_t>(c->GetVirtualMethod(0, sizeof(void*))),
0U,
StubTest::GetEntrypoint(self, kQuickAllocObject),
self);
@@ -1297,7 +1298,7 @@ TEST_F(StubTest, AllocObject) {
{
// We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
- size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
self);
@@ -1311,7 +1312,7 @@ TEST_F(StubTest, AllocObject) {
{
// We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
- size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
self);
@@ -1367,7 +1368,7 @@ TEST_F(StubTest, AllocObject) {
}
self->ClearException();
- size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -1417,7 +1418,8 @@ TEST_F(StubTest, AllocObjectArray) {
// Use an arbitrary method from c to use as referrer
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
10U,
- reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)), // arbitrary
+ // arbitrary
+ reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0, sizeof(void*))),
StubTest::GetEntrypoint(self, kQuickAllocArray),
self);
@@ -1554,7 +1556,7 @@ TEST_F(StubTest, StringCompareTo) {
static void GetSetBooleanStatic(ArtField* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+ ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1584,7 +1586,7 @@ static void GetSetBooleanStatic(ArtField* f, Thread* self,
std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetByteStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
+static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
@@ -1616,7 +1618,7 @@ static void GetSetByteStatic(ArtField* f, Thread* self, mirror::ArtMethod* refer
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+ ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1651,7 +1653,7 @@ static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thre
#endif
}
static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1685,7 +1687,7 @@ static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
#endif
}
-static void GetSetCharStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
+static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
@@ -1716,7 +1718,7 @@ static void GetSetCharStatic(ArtField* f, Thread* self, mirror::ArtMethod* refer
#endif
}
static void GetSetShortStatic(ArtField* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+ ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1747,7 +1749,7 @@ static void GetSetShortStatic(ArtField* f, Thread* self,
}
static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1781,7 +1783,7 @@ static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
#endif
}
static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1815,7 +1817,7 @@ static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
#endif
}
-static void GetSet32Static(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
+static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
@@ -1852,7 +1854,7 @@ static void GetSet32Static(ArtField* f, Thread* self, mirror::ArtMethod* referre
static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1893,7 +1895,7 @@ static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
(defined(__x86_64__) && !defined(__APPLE__))
static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+ ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
reinterpret_cast<size_t>(val),
@@ -1912,7 +1914,7 @@ static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* se
}
#endif
-static void GetSetObjStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
+static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
@@ -1936,7 +1938,7 @@ static void GetSetObjStatic(ArtField* f, Thread* self, mirror::ArtMethod* referr
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
static void set_and_check_instance(ArtField* f, mirror::Object* trg,
- mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
+ mirror::Object* val, Thread* self, ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
@@ -1960,7 +1962,7 @@ static void set_and_check_instance(ArtField* f, mirror::Object* trg,
#endif
static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1982,7 +1984,7 @@ static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
// TODO: Complete these tests for 32b architectures.
-static void GetSet64Static(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
+static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
@@ -2014,7 +2016,7 @@ static void GetSet64Static(ArtField* f, Thread* self, mirror::ArtMethod* referre
static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
defined(__aarch64__)
@@ -2060,11 +2062,11 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
CHECK(o != nullptr);
ScopedObjectAccess soa(self);
- StackHandleScope<4> hs(self);
+ StackHandleScope<3> hs(self);
Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o)));
Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
// Need a method as a referrer
- Handle<mirror::ArtMethod> m(hs.NewHandle(c->GetDirectMethod(0)));
+ ArtMethod* m = c->GetDirectMethod(0, sizeof(void*));
// Play with it...
@@ -2079,27 +2081,27 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
}
switch (type) {
case Primitive::Type::kPrimBoolean:
- GetSetBooleanStatic(f, self, m.Get(), test);
+ GetSetBooleanStatic(f, self, m, test);
break;
case Primitive::Type::kPrimByte:
- GetSetByteStatic(f, self, m.Get(), test);
+ GetSetByteStatic(f, self, m, test);
break;
case Primitive::Type::kPrimChar:
- GetSetCharStatic(f, self, m.Get(), test);
+ GetSetCharStatic(f, self, m, test);
break;
case Primitive::Type::kPrimShort:
- GetSetShortStatic(f, self, m.Get(), test);
+ GetSetShortStatic(f, self, m, test);
break;
case Primitive::Type::kPrimInt:
- GetSet32Static(f, self, m.Get(), test);
+ GetSet32Static(f, self, m, test);
break;
case Primitive::Type::kPrimLong:
- GetSet64Static(f, self, m.Get(), test);
+ GetSet64Static(f, self, m, test);
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (f->GetTypeDescriptor()[0] != '[') {
- GetSetObjStatic(f, self, m.Get(), test);
+ GetSetObjStatic(f, self, m, test);
}
break;
default:
@@ -2118,27 +2120,27 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
}
switch (type) {
case Primitive::Type::kPrimBoolean:
- GetSetBooleanInstance(&obj, f, self, m.Get(), test);
+ GetSetBooleanInstance(&obj, f, self, m, test);
break;
case Primitive::Type::kPrimByte:
- GetSetByteInstance(&obj, f, self, m.Get(), test);
+ GetSetByteInstance(&obj, f, self, m, test);
break;
case Primitive::Type::kPrimChar:
- GetSetCharInstance(&obj, f, self, m.Get(), test);
+ GetSetCharInstance(&obj, f, self, m, test);
break;
case Primitive::Type::kPrimShort:
- GetSetShortInstance(&obj, f, self, m.Get(), test);
+ GetSetShortInstance(&obj, f, self, m, test);
break;
case Primitive::Type::kPrimInt:
- GetSet32Instance(&obj, f, self, m.Get(), test);
+ GetSet32Instance(&obj, f, self, m, test);
break;
case Primitive::Type::kPrimLong:
- GetSet64Instance(&obj, f, self, m.Get(), test);
+ GetSet64Instance(&obj, f, self, m, test);
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (f->GetTypeDescriptor()[0] != '[') {
- GetSetObjInstance(&obj, f, self, m.Get(), test);
+ GetSetObjInstance(&obj, f, self, m, test);
}
break;
default:
@@ -2235,17 +2237,18 @@ TEST_F(StubTest, IMT) {
ASSERT_NE(nullptr, arraylist_jclass);
jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V");
ASSERT_NE(nullptr, arraylist_constructor);
- jmethodID contains_jmethod = env->GetMethodID(arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
+ jmethodID contains_jmethod = env->GetMethodID(
+ arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
ASSERT_NE(nullptr, contains_jmethod);
jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z");
ASSERT_NE(nullptr, add_jmethod);
- // Get mirror representation.
- Handle<mirror::ArtMethod> contains_amethod(hs.NewHandle(soa.DecodeMethod(contains_jmethod)));
+ // Get representation.
+ ArtMethod* contains_amethod = soa.DecodeMethod(contains_jmethod);
// Patch up ArrayList.contains.
- if (contains_amethod.Get()->GetEntryPointFromQuickCompiledCode() == nullptr) {
- contains_amethod.Get()->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
+ if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
+ contains_amethod->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
}
@@ -2254,11 +2257,12 @@ TEST_F(StubTest, IMT) {
// Load List and used methods (JNI).
jclass list_jclass = env->FindClass("java/util/List");
ASSERT_NE(nullptr, list_jclass);
- jmethodID inf_contains_jmethod = env->GetMethodID(list_jclass, "contains", "(Ljava/lang/Object;)Z");
+ jmethodID inf_contains_jmethod = env->GetMethodID(
+ list_jclass, "contains", "(Ljava/lang/Object;)Z");
ASSERT_NE(nullptr, inf_contains_jmethod);
// Get mirror representation.
- Handle<mirror::ArtMethod> inf_contains(hs.NewHandle(soa.DecodeMethod(inf_contains_jmethod)));
+ ArtMethod* inf_contains = soa.DecodeMethod(inf_contains_jmethod);
// Object
@@ -2287,8 +2291,8 @@ TEST_F(StubTest, IMT) {
Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
reinterpret_cast<size_t>(obj.Get()),
StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
- self, contains_amethod.Get(),
- static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()));
+ self, contains_amethod,
+ static_cast<size_t>(inf_contains->GetDexMethodIndex()));
ASSERT_FALSE(self->IsExceptionPending());
EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
@@ -2301,33 +2305,31 @@ TEST_F(StubTest, IMT) {
// Contains.
- result = Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
- reinterpret_cast<size_t>(obj.Get()),
- StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
- self, contains_amethod.Get(),
- static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()));
+ result = Invoke3WithReferrerAndHidden(
+ 0U, reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(obj.Get()),
+ StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline), self, contains_amethod,
+ static_cast<size_t>(inf_contains->GetDexMethodIndex()));
ASSERT_FALSE(self->IsExceptionPending());
EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
// 2. regular interface trampoline
- result = Invoke3WithReferrer(static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()),
+ result = Invoke3WithReferrer(static_cast<size_t>(inf_contains->GetDexMethodIndex()),
reinterpret_cast<size_t>(array_list.Get()),
reinterpret_cast<size_t>(obj.Get()),
StubTest::GetEntrypoint(self,
kQuickInvokeInterfaceTrampolineWithAccessCheck),
- self, contains_amethod.Get());
+ self, contains_amethod);
ASSERT_FALSE(self->IsExceptionPending());
EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
- result = Invoke3WithReferrer(static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()),
- reinterpret_cast<size_t>(array_list.Get()),
- reinterpret_cast<size_t>(array_list.Get()),
- StubTest::GetEntrypoint(self,
- kQuickInvokeInterfaceTrampolineWithAccessCheck),
- self, contains_amethod.Get());
+ result = Invoke3WithReferrer(
+ static_cast<size_t>(inf_contains->GetDexMethodIndex()),
+ reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(array_list.Get()),
+ StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), self,
+ contains_amethod);
ASSERT_FALSE(self->IsExceptionPending());
EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 06bae75..7096c82 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -16,8 +16,8 @@
#include "context_x86.h"
+#include "art_method-inl.h"
#include "base/bit_utils.h"
-#include "mirror/art_method-inl.h"
#include "quick/quick_method_frame_info.h"
namespace art {
@@ -35,7 +35,7 @@ void X86Context::Reset() {
}
void X86Context::FillCalleeSaves(const StackVisitor& fr) {
- mirror::ArtMethod* method = fr.GetMethod();
+ ArtMethod* method = fr.GetMethod();
const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
int spill_pos = 0;
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 2de69aa..d7c4cb1 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -16,13 +16,14 @@
#include "fault_handler.h"
+
#include <sys/ucontext.h>
+
+#include "art_method-inl.h"
#include "base/macros.h"
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
-#include "mirror/art_method.h"
-#include "mirror/art_method-inl.h"
#include "thread.h"
#include "thread-inl.h"
@@ -248,7 +249,7 @@ void FaultManager::HandleNestedSignal(int, siginfo_t*, void* context) {
}
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
- mirror::ArtMethod** out_method,
+ ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
*out_sp = static_cast<uintptr_t>(uc->CTX_ESP);
@@ -267,10 +268,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86));
#endif
if (overflow_addr == fault_addr) {
- *out_method = reinterpret_cast<mirror::ArtMethod*>(uc->CTX_METHOD);
+ *out_method = reinterpret_cast<ArtMethod*>(uc->CTX_METHOD);
} else {
// The method is at the top of the stack.
- *out_method = (reinterpret_cast<StackReference<mirror::ArtMethod>* >(*out_sp)[0]).AsMirrorPtr();
+ *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
}
uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 6ebeba3..9cebb4e 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -477,7 +477,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
// Nothing left to load.
.Lgpr_setup_finished:
mov 20(%ebp), %eax // move method pointer into eax
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
+ call *ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
POP edi // pop edi
@@ -594,7 +594,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
// Nothing left to load.
.Lgpr_setup_finished2:
mov 20(%ebp), %eax // move method pointer into eax
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
+ call *ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
POP edi // pop edi
@@ -1396,7 +1396,7 @@ END_FUNCTION art_quick_proxy_invoke_handler
DEFINE_FUNCTION art_quick_imt_conflict_trampoline
PUSH ecx
movl 8(%esp), %eax // load caller Method*
- movl MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET(%eax), %eax // load dex_cache_resolved_methods
+ movl ART_METHOD_DEX_CACHE_METHODS_OFFSET(%eax), %eax // load dex_cache_resolved_methods
movd %xmm7, %ecx // get target method index stored in xmm0
movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4), %eax // load the target method
POP ecx
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 2c4532c..1fe2ef8 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -16,8 +16,8 @@
#include "context_x86_64.h"
+#include "art_method-inl.h"
#include "base/bit_utils.h"
-#include "mirror/art_method-inl.h"
#include "quick/quick_method_frame_info.h"
namespace art {
@@ -35,7 +35,7 @@ void X86_64Context::Reset() {
}
void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
- mirror::ArtMethod* method = fr.GetMethod();
+ ArtMethod* method = fr.GetMethod();
const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
int spill_pos = 0;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index da4d92b..bd199db 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -57,7 +57,7 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
PUSH r12 // Callee save.
PUSH rbp // Callee save.
PUSH rbx // Callee save.
- // Create space for FPR args, plus space for StackReference<ArtMethod>.
+ // Create space for FPR args, plus space for ArtMethod*.
subq MACRO_LITERAL(4 * 8 + 8), %rsp
CFI_ADJUST_CFA_OFFSET(4 * 8 + 8)
// Save FPRs.
@@ -67,7 +67,7 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for save all callee save frame method.
THIS_LOAD_REQUIRES_READ_BARRIER
- movl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10d
+ movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
// Store rsp as the top quick frame.
@@ -100,7 +100,7 @@ MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME)
PUSH r12 // Callee save.
PUSH rbp // Callee save.
PUSH rbx // Callee save.
- // Create space for FPR args, plus space for StackReference<ArtMethod>.
+ // Create space for FPR args, plus space for ArtMethod*.
subq LITERAL(8 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(8 + 4 * 8)
// Save FPRs.
@@ -110,7 +110,7 @@ MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for refs only callee save frame method.
THIS_LOAD_REQUIRES_READ_BARRIER
- movl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10d
+ movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
// Store rsp as the stop quick frame.
@@ -164,13 +164,12 @@ MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
PUSH rbx // Callee save.
PUSH rdx // Quick arg 2.
PUSH rcx // Quick arg 3.
- // Create space for FPR args and create 2 slots, 1 of padding and 1 for the
- // StackReference<ArtMethod>.
+ // Create space for FPR args and create 2 slots for ArtMethod*.
subq MACRO_LITERAL(80 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
// R10 := ArtMethod* for ref and args callee save frame method.
THIS_LOAD_REQUIRES_READ_BARRIER
- movl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10d
+ movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Save FPRs.
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
@@ -210,8 +209,7 @@ MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI)
PUSH rbx // Callee save.
PUSH rdx // Quick arg 2.
PUSH rcx // Quick arg 3.
- // Create space for FPR args and create 2 slots, 1 of padding and 1 for the
- // StackReference<ArtMethod>.
+ // Create space for FPR args and create 2 slots for ArtMethod*.
subq LITERAL(80 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
// Save FPRs.
@@ -362,7 +360,7 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
- movl FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE(%rsp), %edx // pass caller Method*
+ movq FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE(%rsp), %rdx // pass caller Method*
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread
movq %rsp, %r8 // pass SP
@@ -506,13 +504,13 @@ DEFINE_FUNCTION art_quick_invoke_stub
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store null for method*
+ movq LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
movq %rsi, %r11 // r11 := arg_array
- leaq 4(%rsp), %rdi // rdi is pointing just above the StackReference<method> in the
- // stack arguments.
+ leaq 8(%rsp), %rdi // rdi is pointing just above the ArtMethod* in the stack
+ // arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
leaq 1(%r9), %r10 // r10 := shorty + 1 ; ie skip return arg character
@@ -524,7 +522,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished
.Lgpr_setup_finished:
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
+ call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
POP r15 // Pop r15
POP r14 // Pop r14
@@ -600,12 +598,12 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store null for method*
+ movq LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
movq %rsi, %r11 // r11 := arg_array
- leaq 4(%rsp), %rdi // rdi is pointing just above the StackReference<method> in the
+ leaq 8(%rsp), %rdi // rdi is pointing just above the ArtMethod* in the
// stack arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
@@ -617,7 +615,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2
.Lgpr_setup_finished2:
- call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
+ call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
POP r15 // Pop r15
POP r14 // Pop r14
@@ -751,7 +749,7 @@ END_MACRO
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- movl 8(%rsp), %esi // pass referrer
+ movq 8(%rsp), %rsi // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 is in rdi
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
@@ -763,7 +761,7 @@ END_MACRO
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- movl 8(%rsp), %edx // pass referrer
+ movq 8(%rsp), %rdx // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 and arg1 are in rdi/rsi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
@@ -775,7 +773,7 @@ END_MACRO
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- movl 8(%rsp), %ecx // pass referrer
+ movq 8(%rsp), %rcx // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0, arg1, and arg2 are in rdi/rsi/rdx
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
@@ -922,7 +920,7 @@ DEFINE_FUNCTION art_quick_alloc_object_tlab
// Fast path tlab allocation.
// RDI: uint32_t type_idx, RSI: ArtMethod*
// RDX, RCX, R8, R9: free. RAX: return val.
- movl MIRROR_ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array
+ movl ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array
// Load the class
movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdx, %rdi, MIRROR_OBJECT_ARRAY_COMPONENT_SIZE), %edx
testl %edx, %edx // Check null class
@@ -1309,7 +1307,7 @@ ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_O
// This is singled out as the argument order is different.
DEFINE_FUNCTION art_quick_set64_static
movq %rsi, %rdx // pass new_val
- movl 8(%rsp), %esi // pass referrer
+ movq 8(%rsp), %rsi // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// field_idx is in rdi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
@@ -1340,9 +1338,9 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline
int3
int3
#else
- movl 8(%rsp), %edi // load caller Method*
- movl MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi // load dex_cache_resolved_methods
- movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rax, 4), %edi // load the target method
+ movq 8(%rsp), %rdi // load caller Method*
+ movl ART_METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi // load dex_cache_resolved_methods
+ movq MIRROR_LONG_ARRAY_DATA_OFFSET(%rdi, %rax, 8), %rdi // load the target method
jmp art_quick_invoke_interface_trampoline
#endif // __APPLE__
END_FUNCTION art_quick_imt_conflict_trampoline
@@ -1395,7 +1393,6 @@ END_FUNCTION art_quick_resolution_trampoline
* | XMM2 | float arg 3
* | XMM1 | float arg 2
* | XMM0 | float arg 1
- * | Padding |
* | RDI/Method* | <- sp
* #-------------------#
* | Scratch Alloca | 5K scratch space
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 4991ad7..ee51ec9 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -328,6 +328,11 @@ inline mirror::String* ArtField::GetStringName(Thread* self, bool resolve) {
return name;
}
+template<typename RootVisitorType>
+inline void ArtField::VisitRoots(RootVisitorType& visitor) {
+ visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
+}
+
} // namespace art
#endif // ART_RUNTIME_ART_FIELD_INL_H_
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 47d5a76..e4a5834 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -20,6 +20,7 @@
#include "class_linker-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "handle_scope.h"
+#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "runtime.h"
@@ -46,10 +47,6 @@ void ArtField::SetOffset(MemberOffset num_bytes) {
offset_ = num_bytes.Uint32Value();
}
-void ArtField::VisitRoots(RootVisitor* visitor) {
- declaring_class_.VisitRoot(visitor, RootInfo(kRootStickyClass));
-}
-
ArtField* ArtField::FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset) {
DCHECK(klass != nullptr);
auto* instance_fields = klass->GetIFields();
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 9d3dbd9..7a03723 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -38,7 +38,7 @@ class Object;
class String;
} // namespace mirror
-class ArtField {
+class ArtField FINAL {
public:
ArtField();
@@ -151,8 +151,8 @@ class ArtField {
void SetObj(mirror::Object* object, mirror::Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<typename RootVisitorType>
+ void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccVolatile) != 0;
diff --git a/runtime/mirror/art_method-inl.h b/runtime/art_method-inl.h
index 7c8067a..5cfce41 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -14,19 +14,19 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_MIRROR_ART_METHOD_INL_H_
-#define ART_RUNTIME_MIRROR_ART_METHOD_INL_H_
+#ifndef ART_RUNTIME_ART_METHOD_INL_H_
+#define ART_RUNTIME_ART_METHOD_INL_H_
#include "art_method.h"
#include "art_field.h"
-#include "class.h"
-#include "class_linker-inl.h"
-#include "dex_cache.h"
#include "dex_file.h"
#include "dex_file-inl.h"
-#include "object-inl.h"
-#include "object_array.h"
+#include "gc_root-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
#include "oat.h"
#include "quick/quick_method_frame_info.h"
#include "read_barrier-inl.h"
@@ -34,73 +34,82 @@
#include "utils.h"
namespace art {
-namespace mirror {
-inline uint32_t ArtMethod::ClassSize() {
- uint32_t vtable_entries = Object::kVTableLength;
- return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
+inline mirror::Class* ArtMethod::GetDeclaringClassUnchecked() {
+ return declaring_class_.Read();
}
-template<ReadBarrierOption kReadBarrierOption>
-inline Class* ArtMethod::GetJavaLangReflectArtMethod() {
- DCHECK(!java_lang_reflect_ArtMethod_.IsNull());
- return java_lang_reflect_ArtMethod_.Read<kReadBarrierOption>();
+inline mirror::Class* ArtMethod::GetDeclaringClassNoBarrier() {
+ return declaring_class_.Read<kWithoutReadBarrier>();
}
-inline Class* ArtMethod::GetDeclaringClass() {
- Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_));
- DCHECK(result != nullptr) << this;
- DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this;
+inline mirror::Class* ArtMethod::GetDeclaringClass() {
+ mirror::Class* result = GetDeclaringClassUnchecked();
+ if (kIsDebugBuild) {
+ if (!IsRuntimeMethod()) {
+ CHECK(result != nullptr) << this;
+ CHECK(result->IsIdxLoaded() || result->IsErroneous())
+ << result->GetStatus() << " " << PrettyClass(result);
+ } else {
+ CHECK(result == nullptr) << this;
+ }
+ }
return result;
}
-inline void ArtMethod::SetDeclaringClass(Class *new_declaring_class) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_),
- new_declaring_class);
+inline void ArtMethod::SetDeclaringClass(mirror::Class* new_declaring_class) {
+ declaring_class_ = GcRoot<mirror::Class>(new_declaring_class);
}
inline uint32_t ArtMethod::GetAccessFlags() {
- DCHECK(GetDeclaringClass()->IsIdxLoaded() || GetDeclaringClass()->IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_));
+ DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
+ GetDeclaringClass()->IsErroneous());
+ return access_flags_;
}
inline uint16_t ArtMethod::GetMethodIndex() {
- DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_));
+ DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsResolved() ||
+ GetDeclaringClass()->IsErroneous());
+ return method_index_;
}
inline uint16_t ArtMethod::GetMethodIndexDuringLinking() {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_));
+ return method_index_;
}
inline uint32_t ArtMethod::GetDexMethodIndex() {
- DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_));
+ DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
+ GetDeclaringClass()->IsErroneous());
+ return dex_method_index_;
}
-inline ObjectArray<ArtMethod>* ArtMethod::GetDexCacheResolvedMethods() {
- return GetFieldObject<ObjectArray<ArtMethod>>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_));
+inline mirror::PointerArray* ArtMethod::GetDexCacheResolvedMethods() {
+ return dex_cache_resolved_methods_.Read();
}
-inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index) {
- ArtMethod* method = GetDexCacheResolvedMethods()->Get(method_index);
- if (method != nullptr && !method->GetDeclaringClass()->IsErroneous()) {
- return method;
- } else {
- return nullptr;
+inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index, size_t ptr_size) {
+ auto* method = GetDexCacheResolvedMethods()->GetElementPtrSize<ArtMethod*>(
+ method_index, ptr_size);
+ if (LIKELY(method != nullptr)) {
+ auto* declaring_class = method->GetDeclaringClass();
+ if (LIKELY(declaring_class == nullptr || !declaring_class->IsErroneous())) {
+ return method;
+ }
}
+ return nullptr;
}
-inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method) {
- GetDexCacheResolvedMethods()->Set<false>(method_idx, new_method);
+inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method,
+ size_t ptr_size) {
+ DCHECK(new_method == nullptr || new_method->GetDeclaringClass() != nullptr);
+ GetDexCacheResolvedMethods()->SetElementPtrSize(method_idx, new_method, ptr_size);
}
inline bool ArtMethod::HasDexCacheResolvedMethods() {
return GetDexCacheResolvedMethods() != nullptr;
}
-inline bool ArtMethod::HasSameDexCacheResolvedMethods(ObjectArray<ArtMethod>* other_cache) {
+inline bool ArtMethod::HasSameDexCacheResolvedMethods(mirror::PointerArray* other_cache) {
return GetDexCacheResolvedMethods() == other_cache;
}
@@ -108,20 +117,15 @@ inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other) {
return GetDexCacheResolvedMethods() == other->GetDexCacheResolvedMethods();
}
-
-inline ObjectArray<Class>* ArtMethod::GetDexCacheResolvedTypes() {
- return GetFieldObject<ObjectArray<Class>>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_));
+inline mirror::ObjectArray<mirror::Class>* ArtMethod::GetDexCacheResolvedTypes() {
+ return dex_cache_resolved_types_.Read();
}
template <bool kWithCheck>
-inline Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index) {
- Class* klass;
- if (kWithCheck) {
- klass = GetDexCacheResolvedTypes()->Get(type_index);
- } else {
- klass = GetDexCacheResolvedTypes()->GetWithoutChecks(type_index);
- }
+inline mirror::Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index) {
+ mirror::Class* klass = kWithCheck ?
+ GetDexCacheResolvedTypes()->Get(type_index) :
+ GetDexCacheResolvedTypes()->GetWithoutChecks(type_index);
return (klass != nullptr && !klass->IsErroneous()) ? klass : nullptr;
}
@@ -129,7 +133,8 @@ inline bool ArtMethod::HasDexCacheResolvedTypes() {
return GetDexCacheResolvedTypes() != nullptr;
}
-inline bool ArtMethod::HasSameDexCacheResolvedTypes(ObjectArray<Class>* other_cache) {
+inline bool ArtMethod::HasSameDexCacheResolvedTypes(
+ mirror::ObjectArray<mirror::Class>* other_cache) {
return GetDexCacheResolvedTypes() == other_cache;
}
@@ -165,7 +170,7 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
case kDirect:
return !IsDirect() || IsStatic();
case kVirtual: {
- Class* methods_class = GetDeclaringClass();
+ mirror::Class* methods_class = GetDeclaringClass();
return IsDirect() || (methods_class->IsInterface() && !IsMiranda());
}
case kSuper:
@@ -173,7 +178,7 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
// Interface methods cannot be invoked with invoke-super.
return IsConstructor() || IsStatic() || GetDeclaringClass()->IsInterface();
case kInterface: {
- Class* methods_class = GetDeclaringClass();
+ mirror::Class* methods_class = GetDeclaringClass();
return IsDirect() || !(methods_class->IsInterface() || methods_class->IsObjectClass());
}
default:
@@ -237,7 +242,8 @@ inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
DCHECK(code_pointer != nullptr);
uint32_t offset =
reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
- const void* data = reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
+ const void* data =
+ reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
return CodeInfo(data);
}
@@ -261,7 +267,7 @@ inline const uint8_t* ArtMethod::GetNativeGcMap(const void* code_pointer, size_t
}
inline bool ArtMethod::IsRuntimeMethod() {
- return GetDexMethodIndex() == DexFile::kDexNoIndex;
+ return dex_method_index_ == DexFile::kDexNoIndex;
}
inline bool ArtMethod::IsCalleeSaveMethod() {
@@ -317,48 +323,48 @@ inline const DexFile* ArtMethod::GetDexFile() {
}
inline const char* ArtMethod::GetDeclaringClassDescriptor() {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- uint32_t dex_method_idx = method->GetDexMethodIndex();
+ uint32_t dex_method_idx = GetDexMethodIndex();
if (UNLIKELY(dex_method_idx == DexFile::kDexNoIndex)) {
return "<runtime method>";
}
- const DexFile* dex_file = method->GetDexFile();
+ DCHECK(!IsProxyMethod());
+ const DexFile* dex_file = GetDexFile();
return dex_file->GetMethodDeclaringClassDescriptor(dex_file->GetMethodId(dex_method_idx));
}
inline const char* ArtMethod::GetShorty(uint32_t* out_length) {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- const DexFile* dex_file = method->GetDexFile();
- return dex_file->GetMethodShorty(dex_file->GetMethodId(method->GetDexMethodIndex()), out_length);
+ DCHECK(!IsProxyMethod());
+ const DexFile* dex_file = GetDexFile();
+ return dex_file->GetMethodShorty(dex_file->GetMethodId(GetDexMethodIndex()), out_length);
}
inline const Signature ArtMethod::GetSignature() {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- uint32_t dex_method_idx = method->GetDexMethodIndex();
+ uint32_t dex_method_idx = GetDexMethodIndex();
if (dex_method_idx != DexFile::kDexNoIndex) {
- const DexFile* dex_file = method->GetDexFile();
+ DCHECK(!IsProxyMethod());
+ const DexFile* dex_file = GetDexFile();
return dex_file->GetMethodSignature(dex_file->GetMethodId(dex_method_idx));
}
return Signature::NoSignature();
}
inline const char* ArtMethod::GetName() {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- uint32_t dex_method_idx = method->GetDexMethodIndex();
+ uint32_t dex_method_idx = GetDexMethodIndex();
if (LIKELY(dex_method_idx != DexFile::kDexNoIndex)) {
- const DexFile* dex_file = method->GetDexFile();
+ DCHECK(!IsProxyMethod());
+ const DexFile* dex_file = GetDexFile();
return dex_file->GetMethodName(dex_file->GetMethodId(dex_method_idx));
}
- Runtime* runtime = Runtime::Current();
- if (method == runtime->GetResolutionMethod()) {
+ Runtime* const runtime = Runtime::Current();
+ if (this == runtime->GetResolutionMethod()) {
return "<runtime internal resolution method>";
- } else if (method == runtime->GetImtConflictMethod()) {
+ } else if (this == runtime->GetImtConflictMethod()) {
return "<runtime internal imt conflict method>";
- } else if (method == runtime->GetCalleeSaveMethod(Runtime::kSaveAll)) {
+ } else if (this == runtime->GetCalleeSaveMethod(Runtime::kSaveAll)) {
return "<runtime internal callee-save all registers method>";
- } else if (method == runtime->GetCalleeSaveMethod(Runtime::kRefsOnly)) {
+ } else if (this == runtime->GetCalleeSaveMethod(Runtime::kRefsOnly)) {
return "<runtime internal callee-save reference registers method>";
- } else if (method == runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs)) {
+ } else if (this == runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs)) {
return "<runtime internal callee-save reference and argument registers method>";
} else {
return "<unknown runtime internal method>";
@@ -370,92 +376,96 @@ inline const DexFile::CodeItem* ArtMethod::GetCodeItem() {
}
inline bool ArtMethod::IsResolvedTypeIdx(uint16_t type_idx) {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- return method->GetDexCacheResolvedType(type_idx) != nullptr;
+ DCHECK(!IsProxyMethod());
+ return GetDexCacheResolvedType(type_idx) != nullptr;
}
inline int32_t ArtMethod::GetLineNumFromDexPC(uint32_t dex_pc) {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
+ DCHECK(!IsProxyMethod());
if (dex_pc == DexFile::kDexNoIndex) {
- return method->IsNative() ? -2 : -1;
+ return IsNative() ? -2 : -1;
}
- return method->GetDexFile()->GetLineNumFromPC(method, dex_pc);
+ return GetDexFile()->GetLineNumFromPC(this, dex_pc);
}
inline const DexFile::ProtoId& ArtMethod::GetPrototype() {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- const DexFile* dex_file = method->GetDexFile();
- return dex_file->GetMethodPrototype(dex_file->GetMethodId(method->GetDexMethodIndex()));
+ DCHECK(!IsProxyMethod());
+ const DexFile* dex_file = GetDexFile();
+ return dex_file->GetMethodPrototype(dex_file->GetMethodId(GetDexMethodIndex()));
}
inline const DexFile::TypeList* ArtMethod::GetParameterTypeList() {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- const DexFile* dex_file = method->GetDexFile();
+ DCHECK(!IsProxyMethod());
+ const DexFile* dex_file = GetDexFile();
const DexFile::ProtoId& proto = dex_file->GetMethodPrototype(
- dex_file->GetMethodId(method->GetDexMethodIndex()));
+ dex_file->GetMethodId(GetDexMethodIndex()));
return dex_file->GetProtoParameters(proto);
}
inline const char* ArtMethod::GetDeclaringClassSourceFile() {
- return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetSourceFile();
+ DCHECK(!IsProxyMethod());
+ return GetDeclaringClass()->GetSourceFile();
}
inline uint16_t ArtMethod::GetClassDefIndex() {
- return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetDexClassDefIndex();
+ DCHECK(!IsProxyMethod());
+ return GetDeclaringClass()->GetDexClassDefIndex();
}
inline const DexFile::ClassDef& ArtMethod::GetClassDef() {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- return method->GetDexFile()->GetClassDef(GetClassDefIndex());
+ DCHECK(!IsProxyMethod());
+ return GetDexFile()->GetClassDef(GetClassDefIndex());
}
inline const char* ArtMethod::GetReturnTypeDescriptor() {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- const DexFile* dex_file = method->GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
+ DCHECK(!IsProxyMethod());
+ const DexFile* dex_file = GetDexFile();
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(GetDexMethodIndex());
const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
uint16_t return_type_idx = proto_id.return_type_idx_;
return dex_file->GetTypeDescriptor(dex_file->GetTypeId(return_type_idx));
}
inline const char* ArtMethod::GetTypeDescriptorFromTypeIdx(uint16_t type_idx) {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- const DexFile* dex_file = method->GetDexFile();
+ DCHECK(!IsProxyMethod());
+ const DexFile* dex_file = GetDexFile();
return dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_idx));
}
inline mirror::ClassLoader* ArtMethod::GetClassLoader() {
- return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetClassLoader();
+ DCHECK(!IsProxyMethod());
+ return GetDeclaringClass()->GetClassLoader();
}
inline mirror::DexCache* ArtMethod::GetDexCache() {
- return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetDexCache();
+ DCHECK(!IsProxyMethod());
+ return GetDeclaringClass()->GetDexCache();
}
inline bool ArtMethod::IsProxyMethod() {
return GetDeclaringClass()->IsProxyClass();
}
-inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy() {
+inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(size_t pointer_size) {
if (LIKELY(!IsProxyMethod())) {
return this;
}
mirror::Class* klass = GetDeclaringClass();
- mirror::ArtMethod* interface_method = GetDexCacheResolvedMethods()->Get(GetDexMethodIndex());
+ auto interface_method = GetDexCacheResolvedMethods()->GetElementPtrSize<ArtMethod*>(
+ GetDexMethodIndex(), pointer_size);
DCHECK(interface_method != nullptr);
DCHECK_EQ(interface_method,
Runtime::Current()->GetClassLinker()->FindMethodForProxy(klass, this));
return interface_method;
}
-inline void ArtMethod::SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_),
- new_dex_cache_methods);
+inline void ArtMethod::SetDexCacheResolvedMethods(mirror::PointerArray* new_dex_cache_methods) {
+ dex_cache_resolved_methods_ = GcRoot<mirror::PointerArray>(new_dex_cache_methods);
}
-inline void ArtMethod::SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_classes) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_),
- new_dex_cache_classes);
+inline void ArtMethod::SetDexCacheResolvedTypes(
+ mirror::ObjectArray<mirror::Class>* new_dex_cache_types) {
+ dex_cache_resolved_types_ = GcRoot<mirror::ObjectArray<mirror::Class>>(new_dex_cache_types);
}
inline mirror::Class* ArtMethod::GetReturnType(bool resolve) {
@@ -472,17 +482,23 @@ inline mirror::Class* ArtMethod::GetReturnType(bool resolve) {
return type;
}
-inline void ArtMethod::CheckObjectSizeEqualsMirrorSize() {
- // Using the default, check the class object size to make sure it matches the size of the
- // object.
- size_t this_size = sizeof(*this);
-#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
- this_size += sizeof(void*) - sizeof(uint32_t);
-#endif
- DCHECK_EQ(GetClass()->GetObjectSize(), this_size);
+template<typename RootVisitorType>
+void ArtMethod::VisitRoots(RootVisitorType& visitor) {
+ visitor.VisitRootIfNonNull(declaring_class_.AddressWithoutBarrier());
+ visitor.VisitRootIfNonNull(dex_cache_resolved_methods_.AddressWithoutBarrier());
+ visitor.VisitRootIfNonNull(dex_cache_resolved_types_.AddressWithoutBarrier());
+}
+
+inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) {
+ memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
+ ObjectSize(image_pointer_size));
+ declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
+ dex_cache_resolved_methods_ = GcRoot<mirror::PointerArray>(
+ const_cast<ArtMethod*>(src)->GetDexCacheResolvedMethods());
+ dex_cache_resolved_types_ = GcRoot<mirror::ObjectArray<mirror::Class>>(
+ const_cast<ArtMethod*>(src)->GetDexCacheResolvedTypes());
}
-} // namespace mirror
} // namespace art
-#endif // ART_RUNTIME_MIRROR_ART_METHOD_INL_H_
+#endif // ART_RUNTIME_ART_METHOD_INL_H_
diff --git a/runtime/mirror/art_method.cc b/runtime/art_method.cc
index 9518c9d..fbaf0ae 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/art_method.cc
@@ -16,12 +16,10 @@
#include "art_method.h"
-#include "abstract_method.h"
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/stringpiece.h"
-#include "class-inl.h"
#include "dex_file-inl.h"
#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
@@ -32,15 +30,15 @@
#include "jit/jit_code_cache.h"
#include "jni_internal.h"
#include "mapping_table.h"
-#include "object_array-inl.h"
-#include "object_array.h"
-#include "object-inl.h"
+#include "mirror/abstract_method.h"
+#include "mirror/class-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/string.h"
#include "scoped_thread_state_change.h"
-#include "string.h"
#include "well_known_classes.h"
namespace art {
-namespace mirror {
extern "C" void art_quick_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
const char*);
@@ -49,9 +47,6 @@ extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Th
const char*);
#endif
-// TODO: get global references for these
-GcRoot<Class> ArtMethod::java_lang_reflect_ArtMethod_;
-
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method) {
auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(jlr_method);
@@ -59,17 +54,13 @@ ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnabl
return abstract_method->GetArtMethod();
}
-void ArtMethod::VisitRoots(RootVisitor* visitor) {
- java_lang_reflect_ArtMethod_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
-}
-
mirror::String* ArtMethod::GetNameAsString(Thread* self) {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- const DexFile* dex_file = method->GetDexFile();
- uint32_t dex_method_idx = method->GetDexMethodIndex();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
+ CHECK(!IsProxyMethod());
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache()));
+ auto* dex_file = dex_cache->GetDexFile();
+ uint32_t dex_method_idx = GetDexMethodIndex();
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
return Runtime::Current()->GetClassLinker()->ResolveString(*dex_file, method_id.name_idx_,
dex_cache);
}
@@ -87,17 +78,6 @@ InvokeType ArtMethod::GetInvokeType() {
}
}
-void ArtMethod::SetClass(Class* java_lang_reflect_ArtMethod) {
- CHECK(java_lang_reflect_ArtMethod_.IsNull());
- CHECK(java_lang_reflect_ArtMethod != nullptr);
- java_lang_reflect_ArtMethod_ = GcRoot<Class>(java_lang_reflect_ArtMethod);
-}
-
-void ArtMethod::ResetClass() {
- CHECK(!java_lang_reflect_ArtMethod_.IsNull());
- java_lang_reflect_ArtMethod_ = GcRoot<Class>(nullptr);
-}
-
size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) {
CHECK_LE(1U, shorty.length());
uint32_t num_registers = 0;
@@ -129,31 +109,33 @@ static bool HasSameNameAndSignature(ArtMethod* method1, ArtMethod* method2)
return dex_file->GetMethodSignature(mid) == dex_file2->GetMethodSignature(mid2);
}
-ArtMethod* ArtMethod::FindOverriddenMethod() {
+ArtMethod* ArtMethod::FindOverriddenMethod(size_t pointer_size) {
if (IsStatic()) {
return nullptr;
}
- Class* declaring_class = GetDeclaringClass();
- Class* super_class = declaring_class->GetSuperClass();
+ mirror::Class* declaring_class = GetDeclaringClass();
+ mirror::Class* super_class = declaring_class->GetSuperClass();
uint16_t method_index = GetMethodIndex();
ArtMethod* result = nullptr;
// Did this method override a super class method? If so load the result from the super class'
// vtable
if (super_class->HasVTable() && method_index < super_class->GetVTableLength()) {
- result = super_class->GetVTableEntry(method_index);
+ result = super_class->GetVTableEntry(method_index, pointer_size);
} else {
// Method didn't override superclass method so search interfaces
if (IsProxyMethod()) {
- result = GetDexCacheResolvedMethods()->Get(GetDexMethodIndex());
+ result = GetDexCacheResolvedMethods()->GetElementPtrSize<ArtMethod*>(
+ GetDexMethodIndex(), pointer_size);
CHECK_EQ(result,
Runtime::Current()->GetClassLinker()->FindMethodForProxy(GetDeclaringClass(), this));
} else {
- IfTable* iftable = GetDeclaringClass()->GetIfTable();
+ mirror::IfTable* iftable = GetDeclaringClass()->GetIfTable();
for (size_t i = 0; i < iftable->Count() && result == nullptr; i++) {
- Class* interface = iftable->GetInterface(i);
+ mirror::Class* interface = iftable->GetInterface(i);
for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) {
- mirror::ArtMethod* interface_method = interface->GetVirtualMethod(j);
- if (HasSameNameAndSignature(this, interface_method)) {
+ ArtMethod* interface_method = interface->GetVirtualMethod(j, pointer_size);
+ if (HasSameNameAndSignature(
+ this, interface_method->GetInterfaceMethodIfProxy(sizeof(void*)))) {
result = interface_method;
break;
}
@@ -161,9 +143,8 @@ ArtMethod* ArtMethod::FindOverriddenMethod() {
}
}
}
- if (kIsDebugBuild) {
- DCHECK(result == nullptr || HasSameNameAndSignature(this, result));
- }
+ DCHECK(result == nullptr || HasSameNameAndSignature(
+ GetInterfaceMethodIfProxy(sizeof(void*)), result->GetInterfaceMethodIfProxy(sizeof(void*))));
return result;
}
@@ -264,9 +245,9 @@ uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failur
return UINTPTR_MAX;
}
-uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> exception_type,
+uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
uint32_t dex_pc, bool* has_no_move_exception) {
- const DexFile::CodeItem* code_item = h_this->GetCodeItem();
+ const DexFile::CodeItem* code_item = GetCodeItem();
// Set aside the exception while we resolve its type.
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -283,7 +264,7 @@ uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> excep
break;
}
// Does this catch exception type apply?
- Class* iter_exception_type = h_this->GetClassFromTypeIndex(iter_type_idx, true);
+ mirror::Class* iter_exception_type = GetClassFromTypeIndex(iter_type_idx, true);
if (UNLIKELY(iter_exception_type == nullptr)) {
// Now have a NoClassDefFoundError as exception. Ignore in case the exception class was
// removed by a pro-guard like tool.
@@ -293,7 +274,7 @@ uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> excep
// release its in use context at the end.
delete self->GetLongJumpContext();
LOG(WARNING) << "Unresolved exception class when finding catch block: "
- << DescriptorToDot(h_this->GetTypeDescriptorFromTypeIdx(iter_type_idx));
+ << DescriptorToDot(GetTypeDescriptorFromTypeIdx(iter_type_idx));
} else if (iter_exception_type->IsAssignableFrom(exception_type.Get())) {
found_dex_pc = it.GetHandlerAddress();
break;
@@ -375,7 +356,8 @@ const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) {
#ifndef NDEBUG
uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) {
CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
- CHECK_EQ(quick_entry_point, Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*)));
+ CHECK_EQ(quick_entry_point,
+ Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*)));
return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
}
#endif
@@ -390,7 +372,7 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
if (kIsDebugBuild) {
self->AssertThreadSuspensionIsAllowable();
CHECK_EQ(kRunnable, self->GetState());
- CHECK_STREQ(GetShorty(), shorty);
+ CHECK_STREQ(GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(), shorty);
}
// Push a transition back into managed code onto the linked list in thread.
@@ -405,16 +387,20 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
if (IsStatic()) {
art::interpreter::EnterInterpreterFromInvoke(self, this, nullptr, args, result);
} else {
- Object* receiver = reinterpret_cast<StackReference<Object>*>(&args[0])->AsMirrorPtr();
+ mirror::Object* receiver =
+ reinterpret_cast<StackReference<mirror::Object>*>(&args[0])->AsMirrorPtr();
art::interpreter::EnterInterpreterFromInvoke(self, this, receiver, args + 1, result);
}
} else {
- const bool kLogInvocationStartAndReturn = false;
+ DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+
+ constexpr bool kLogInvocationStartAndReturn = false;
bool have_quick_code = GetEntryPointFromQuickCompiledCode() != nullptr;
if (LIKELY(have_quick_code)) {
if (kLogInvocationStartAndReturn) {
- LOG(INFO) << StringPrintf("Invoking '%s' quick code=%p", PrettyMethod(this).c_str(),
- GetEntryPointFromQuickCompiledCode());
+ LOG(INFO) << StringPrintf(
+ "Invoking '%s' quick code=%p static=%d", PrettyMethod(this).c_str(),
+ GetEntryPointFromQuickCompiledCode(), static_cast<int>(IsStatic() ? 1 : 0));
}
// Ensure that we won't be accidentally calling quick compiled code when -Xint.
@@ -481,6 +467,11 @@ QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
}
+ // This goes before IsProxyMethod since runtime methods have a null declaring class.
+ if (UNLIKELY(IsRuntimeMethod())) {
+ return runtime->GetRuntimeMethodFrameInfo(this);
+ }
+
// For Proxy method we add special handling for the direct method case (there is only one
// direct method - constructor). Direct method is cloned from original
// java.lang.reflect.Proxy class together with code and as a result it is executed as usual
@@ -497,10 +488,6 @@ QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
}
}
- if (UNLIKELY(IsRuntimeMethod())) {
- return runtime->GetRuntimeMethodFrameInfo(this);
- }
-
const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
ClassLinker* class_linker = runtime->GetClassLinker();
// On failure, instead of null we get the quick-generic-jni-trampoline for native method
@@ -516,11 +503,9 @@ QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
// Callee saves + handle scope + method ref + alignment
- size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
- - sizeof(void*) // callee-save frame stores a whole method pointer
- + sizeof(StackReference<mirror::ArtMethod>),
- kStackAlignment);
-
+ // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
+ size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) +
+ sizeof(ArtMethod*) + scope_size, kStackAlignment);
return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
}
@@ -570,5 +555,4 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param
return true;
}
-} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.h b/runtime/art_method.h
index 0da5925..4a1e2c4 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/art_method.h
@@ -14,50 +14,61 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_MIRROR_ART_METHOD_H_
-#define ART_RUNTIME_MIRROR_ART_METHOD_H_
+#ifndef ART_RUNTIME_ART_METHOD_H_
+#define ART_RUNTIME_ART_METHOD_H_
#include "dex_file.h"
#include "gc_root.h"
#include "invoke_type.h"
#include "method_reference.h"
#include "modifiers.h"
-#include "object.h"
+#include "mirror/object.h"
#include "object_callbacks.h"
#include "quick/quick_method_frame_info.h"
#include "read_barrier_option.h"
#include "stack.h"
#include "stack_map.h"
+#include "utils.h"
namespace art {
-struct ArtMethodOffsets;
-struct ConstructorMethodOffsets;
union JValue;
class ScopedObjectAccessAlreadyRunnable;
class StringPiece;
class ShadowFrame;
namespace mirror {
+class Array;
+class Class;
+class PointerArray;
+} // namespace mirror
typedef void (EntryPointFromInterpreter)(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result);
-#define ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
-
-// C++ mirror of java.lang.reflect.ArtMethod.
-class MANAGED ArtMethod FINAL : public Object {
+class ArtMethod FINAL {
public:
- // Size of java.lang.reflect.ArtMethod.class.
- static uint32_t ClassSize();
+ ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0),
+ method_index_(0) { }
+
+ ArtMethod(const ArtMethod& src, size_t image_pointer_size) {
+ CopyFrom(&src, image_pointer_size);
+ }
static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* GetDeclaringClass() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE mirror::Class* GetDeclaringClassNoBarrier()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetDeclaringClass(mirror::Class *new_declaring_class)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
@@ -65,9 +76,9 @@ class MANAGED ArtMethod FINAL : public Object {
ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetAccessFlags(uint32_t new_access_flags) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_), new_access_flags);
+ access_flags_ = new_access_flags;
}
// Approximate what kind of method call would be used for this method.
@@ -180,7 +191,7 @@ class MANAGED ArtMethod FINAL : public Object {
void SetMethodIndex(uint16_t new_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_), new_method_index);
+ method_index_ = new_method_index;
}
static MemberOffset DexMethodIndexOffset() {
@@ -191,13 +202,13 @@ class MANAGED ArtMethod FINAL : public Object {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
}
- uint32_t GetCodeItemOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_));
+ uint32_t GetCodeItemOffset() {
+ return dex_code_item_offset_;
}
- void SetCodeItemOffset(uint32_t new_code_off) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetCodeItemOffset(uint32_t new_code_off) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_), new_code_off);
+ dex_code_item_offset_ = new_code_off;
}
// Number of 32bit registers that would be required to hold all the arguments
@@ -205,9 +216,9 @@ class MANAGED ArtMethod FINAL : public Object {
ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetDexMethodIndex(uint32_t new_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetDexMethodIndex(uint32_t new_idx) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_), new_idx);
+ dex_method_index_ = new_idx;
}
static MemberOffset DexCacheResolvedMethodsOffset() {
@@ -218,26 +229,29 @@ class MANAGED ArtMethod FINAL : public Object {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_);
}
- ALWAYS_INLINE ObjectArray<ArtMethod>* GetDexCacheResolvedMethods()
+ ALWAYS_INLINE mirror::PointerArray* GetDexCacheResolvedMethods()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx)
+ ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx, size_t ptr_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method)
+ ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method,
+ size_t ptr_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods)
+ ALWAYS_INLINE void SetDexCacheResolvedMethods(mirror::PointerArray* new_dex_cache_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedMethods(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedMethods(ObjectArray<ArtMethod>* other_cache)
+ bool HasSameDexCacheResolvedMethods(ArtMethod* other)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedMethods(mirror::PointerArray* other_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kWithCheck = true>
- Class* GetDexCacheResolvedType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_types)
+ mirror::Class* GetDexCacheResolvedType(uint32_t type_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* new_dex_cache_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedTypes(ObjectArray<Class>* other_cache)
+ bool HasSameDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* other_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the Class* from the type index into this method's dex cache.
@@ -245,7 +259,7 @@ class MANAGED ArtMethod FINAL : public Object {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Find the method that this method overrides.
- ArtMethod* FindOverriddenMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Find the method index for this method within other_dexfile. If this method isn't present then
// return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same
@@ -258,59 +272,39 @@ class MANAGED ArtMethod FINAL : public Object {
void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- EntryPointFromInterpreter* GetEntryPointFromInterpreter()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckObjectSizeEqualsMirrorSize();
+ EntryPointFromInterpreter* GetEntryPointFromInterpreter() {
return GetEntryPointFromInterpreterPtrSize(sizeof(void*));
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtrWithSize<EntryPointFromInterpreter*, kVerifyFlags>(
+ EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size) {
+ return GetEntryPoint<EntryPointFromInterpreter*>(
EntryPointFromInterpreterOffset(pointer_size), pointer_size);
}
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckObjectSizeEqualsMirrorSize();
+ void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter) {
SetEntryPointFromInterpreterPtrSize(entry_point_from_interpreter, sizeof(void*));
}
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter* entry_point_from_interpreter,
- size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtrWithSize<false, true, kVerifyFlags>(
- EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, pointer_size);
+ size_t pointer_size) {
+ SetEntryPoint(EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter,
+ pointer_size);
}
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- const void* GetEntryPointFromQuickCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckObjectSizeEqualsMirrorSize();
+ const void* GetEntryPointFromQuickCompiledCode() {
return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
}
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtrWithSize<const void*, kVerifyFlags>(
+ ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size) {
+ return GetEntryPoint<const void*>(
EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
}
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckObjectSizeEqualsMirrorSize();
+ void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) {
SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code,
sizeof(void*));
}
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
- const void* entry_point_from_quick_compiled_code, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtrWithSize<false, true, kVerifyFlags>(
- EntryPointFromQuickCompiledCodeOffset(pointer_size), entry_point_from_quick_compiled_code,
- pointer_size);
+ const void* entry_point_from_quick_compiled_code, size_t pointer_size) {
+ SetEntryPoint(EntryPointFromQuickCompiledCodeOffset(pointer_size),
+ entry_point_from_quick_compiled_code, pointer_size);
}
uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -318,7 +312,7 @@ class MANAGED ArtMethod FINAL : public Object {
// Check whether the given PC is within the quick compiled code associated with this method's
// quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
// debug purposes.
- bool PcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool PcIsWithinQuickCode(uintptr_t pc) {
return PcIsWithinQuickCode(
reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
}
@@ -330,8 +324,8 @@ class MANAGED ArtMethod FINAL : public Object {
// interpretered on invocation.
bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint32_t GetQuickOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetQuickOatCodeOffset();
+ void SetQuickOatCodeOffset(uint32_t code_offset);
ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
@@ -394,7 +388,7 @@ class MANAGED ArtMethod FINAL : public Object {
}
FrameOffset GetHandleScopeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- constexpr size_t handle_scope_offset = sizeof(StackReference<mirror::ArtMethod>);
+ constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
return FrameOffset(handle_scope_offset);
}
@@ -419,30 +413,23 @@ class MANAGED ArtMethod FINAL : public Object {
PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
}
- void* GetEntryPointFromJni() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckObjectSizeEqualsMirrorSize();
+ void* GetEntryPointFromJni() {
return GetEntryPointFromJniPtrSize(sizeof(void*));
}
- ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtrWithSize<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
+ ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size) {
+ return GetEntryPoint<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
}
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckObjectSizeEqualsMirrorSize();
- SetEntryPointFromJniPtrSize<kVerifyFlags>(entrypoint, sizeof(void*));
+ SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
}
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtrWithSize<false, true, kVerifyFlags>(
- EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
+ ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) {
+ SetEntryPoint(EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
}
// Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal
// conventions for a method of managed code. Returns false for Proxy methods.
- bool IsRuntimeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool IsRuntimeMethod();
// Is this a hand crafted method used for something like describing callee saves?
bool IsCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -479,19 +466,12 @@ class MANAGED ArtMethod FINAL : public Object {
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
// a move-exception instruction is present.
- static uint32_t FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> exception_type,
- uint32_t dex_pc, bool* has_no_move_exception)
+ uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc,
+ bool* has_no_move_exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void SetClass(Class* java_lang_reflect_ArtMethod);
-
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetJavaLangReflectArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- static void ResetClass();
-
- static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<typename RootVisitorType>
+ void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -539,37 +519,35 @@ class MANAGED ArtMethod FINAL : public Object {
mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// May cause thread suspension due to class resolution.
bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static size_t SizeWithoutPointerFields(size_t pointer_size) {
- size_t total = sizeof(ArtMethod) - sizeof(PtrSizedFields);
-#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
- // Add 4 bytes if 64 bit, otherwise 0.
- total += pointer_size - sizeof(uint32_t);
-#endif
- return total;
- }
-
- // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
- static size_t InstanceSize(size_t pointer_size) {
- return SizeWithoutPointerFields(pointer_size) +
+ // Size of an instance of this object.
+ static size_t ObjectSize(size_t pointer_size) {
+ return RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size) +
(sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
}
+ void CopyFrom(const ArtMethod* src, size_t image_pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE mirror::ObjectArray<mirror::Class>* GetDexCacheResolvedTypes()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
- HeapReference<Class> declaring_class_;
+ GcRoot<mirror::Class> declaring_class_;
// Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
- HeapReference<ObjectArray<ArtMethod>> dex_cache_resolved_methods_;
+ GcRoot<mirror::PointerArray> dex_cache_resolved_methods_;
// Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
- HeapReference<ObjectArray<Class>> dex_cache_resolved_types_;
+ GcRoot<mirror::ObjectArray<mirror::Class>> dex_cache_resolved_types_;
// Access flags; low 16 bits are defined by spec.
uint32_t access_flags_;
@@ -592,6 +570,8 @@ class MANAGED ArtMethod FINAL : public Object {
// Fake padding field gets inserted here.
// Must be the last fields in the method.
+ // PACKED(4) is necessary for the correctness of
+ // RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size).
struct PACKED(4) PtrSizedFields {
// Method dispatch from the interpreter invokes this pointer which may cause a bridge into
// compiled code.
@@ -605,21 +585,36 @@ class MANAGED ArtMethod FINAL : public Object {
void* entry_point_from_quick_compiled_code_;
} ptr_sized_fields_;
- static GcRoot<Class> java_lang_reflect_ArtMethod_;
-
private:
- ALWAYS_INLINE void CheckObjectSizeEqualsMirrorSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- ALWAYS_INLINE ObjectArray<Class>* GetDexCacheResolvedTypes()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
static size_t PtrSizedFieldsOffset(size_t pointer_size) {
- size_t offset = OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_);
-#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
- // Add 4 bytes if 64 bit, otherwise 0.
- offset += pointer_size - sizeof(uint32_t);
-#endif
- return offset;
+ // Round up to pointer size for padding field.
+ return RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size);
+ }
+
+ template<typename T>
+ ALWAYS_INLINE T GetEntryPoint(MemberOffset offset, size_t pointer_size) const {
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+ const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
+ if (pointer_size == sizeof(uint32_t)) {
+ return reinterpret_cast<T>(*reinterpret_cast<const uint32_t*>(addr));
+ } else {
+ auto v = *reinterpret_cast<const uint64_t*>(addr);
+ DCHECK_EQ(reinterpret_cast<uint64_t>(reinterpret_cast<T>(v)), v) << "Conversion lost bits";
+ return reinterpret_cast<T>(v);
+ }
+ }
+
+ template<typename T>
+ ALWAYS_INLINE void SetEntryPoint(MemberOffset offset, T new_value, size_t pointer_size) {
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+ const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
+ if (pointer_size == sizeof(uint32_t)) {
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(new_value);
+ DCHECK_EQ(static_cast<uint32_t>(ptr), ptr) << "Conversion lost bits";
+ *reinterpret_cast<uint32_t*>(addr) = static_cast<uint32_t>(ptr);
+ } else {
+ *reinterpret_cast<uint64_t*>(addr) = reinterpret_cast<uintptr_t>(new_value);
+ }
}
// Code points to the start of the quick code.
@@ -640,11 +635,9 @@ class MANAGED ArtMethod FINAL : public Object {
EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
}
- friend struct art::ArtMethodOffsets; // for verifying offset information
- DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
+ DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits.
};
-} // namespace mirror
} // namespace art
-#endif // ART_RUNTIME_MIRROR_ART_METHOD_H_
+#endif // ART_RUNTIME_ART_METHOD_H_
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 3e677a4..d7efe1c 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -18,8 +18,8 @@
#define ART_RUNTIME_ASM_SUPPORT_H_
#if defined(__cplusplus)
+#include "art_method.h"
#include "lock_word.h"
-#include "mirror/art_method.h"
#include "mirror/class.h"
#include "mirror/string.h"
#include "runtime.h"
@@ -69,12 +69,12 @@ ADD_TEST_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET),
art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kSaveAll))
// Offset of field Runtime::callee_save_methods_[kRefsOnly]
-#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET COMPRESSED_REFERENCE_SIZE
+#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 8
ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET),
art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsOnly))
// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
-#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET (2 * COMPRESSED_REFERENCE_SIZE)
+#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET (2 * 8)
ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET),
art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsAndArgs))
@@ -135,13 +135,13 @@ ADD_TEST_EQ(size_t(MIRROR_OBJECT_HEADER_SIZE), sizeof(art::mirror::Object))
#define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
art::mirror::Class::ComponentTypeOffset().Int32Value())
-#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (44 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (36 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
art::mirror::Class::AccessFlagsOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
art::mirror::Class::ObjectSizeOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_OFFSET (108 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_STATUS_OFFSET (124 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
@@ -169,6 +169,10 @@ ADD_TEST_EQ(MIRROR_OBJECT_ARRAY_DATA_OFFSET,
ADD_TEST_EQ(static_cast<size_t>(MIRROR_OBJECT_ARRAY_COMPONENT_SIZE),
sizeof(art::mirror::HeapReference<art::mirror::Object>))
+#define MIRROR_LONG_ARRAY_DATA_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_LONG_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
+
// Offsets within java.lang.String.
#define MIRROR_STRING_COUNT_OFFSET MIRROR_OBJECT_HEADER_SIZE
ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32Value())
@@ -177,21 +181,21 @@ ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32
ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
// Offsets within java.lang.reflect.ArtMethod.
-#define MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET,
- art::mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())
+#define ART_METHOD_DEX_CACHE_METHODS_OFFSET 4
+ADD_TEST_EQ(ART_METHOD_DEX_CACHE_METHODS_OFFSET,
+ art::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())
-#define MIRROR_ART_METHOD_DEX_CACHE_TYPES_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_TYPES_OFFSET,
- art::mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())
+#define ART_METHOD_DEX_CACHE_TYPES_OFFSET 8
+ADD_TEST_EQ(ART_METHOD_DEX_CACHE_TYPES_OFFSET,
+ art::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())
-#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32 (36 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
- art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value())
+#define ART_METHOD_QUICK_CODE_OFFSET_32 36
+ADD_TEST_EQ(ART_METHOD_QUICK_CODE_OFFSET_32,
+ art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value())
-#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64 (48 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
- art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value())
+#define ART_METHOD_QUICK_CODE_OFFSET_64 48
+ADD_TEST_EQ(ART_METHOD_QUICK_CODE_OFFSET_64,
+ art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value())
#define LOCK_WORD_STATE_SHIFT 30
ADD_TEST_EQ(LOCK_WORD_STATE_SHIFT, static_cast<int32_t>(art::LockWord::kStateShift))
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index b53fa84..8f2d94b 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -302,6 +302,18 @@ void ArenaAllocator::ObtainNewArenaForAllocation(size_t allocation_size) {
end_ = new_arena->End();
}
+bool ArenaAllocator::Contains(const void* ptr) const {
+ if (ptr >= begin_ && ptr < end_) {
+ return true;
+ }
+ for (const Arena* cur_arena = arena_head_; cur_arena != nullptr; cur_arena = cur_arena->next_) {
+ if (cur_arena->Contains(ptr)) {
+ return true;
+ }
+ }
+ return false;
+}
+
MemStats::MemStats(const char* name, const ArenaAllocatorStats* stats, const Arena* first_arena,
ssize_t lost_bytes_adjustment)
: name_(name),
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 2e617b5..d9723b5 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -142,6 +142,11 @@ class Arena {
return bytes_allocated_;
}
+ // Return true if ptr is contained in the arena.
+ bool Contains(const void* ptr) const {
+ return memory_ <= ptr && ptr < memory_ + bytes_allocated_;
+ }
+
protected:
size_t bytes_allocated_;
uint8_t* memory_;
@@ -219,19 +224,52 @@ class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats
return ret;
}
+ // Realloc never frees the input pointer, it is the caller's job to do this if necessary.
+ void* Realloc(void* ptr, size_t ptr_size, size_t new_size,
+ ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
+ DCHECK_GE(new_size, ptr_size);
+ DCHECK_EQ(ptr == nullptr, ptr_size == 0u);
+ auto* end = reinterpret_cast<uint8_t*>(ptr) + ptr_size;
+ // If we haven't allocated anything else, we can safely extend.
+ if (end == ptr_) {
+ const size_t size_delta = new_size - ptr_size;
+ // Check remain space.
+ const size_t remain = end_ - ptr_;
+ if (remain >= size_delta) {
+ ptr_ += size_delta;
+ ArenaAllocatorStats::RecordAlloc(size_delta, kind);
+ return ptr;
+ }
+ }
+ auto* new_ptr = Alloc(new_size, kind);
+ memcpy(new_ptr, ptr, ptr_size);
+ // TODO: Call free on ptr if linear alloc supports free.
+ return new_ptr;
+ }
+
template <typename T>
T* AllocArray(size_t length, ArenaAllocKind kind = kArenaAllocMisc) {
return static_cast<T*>(Alloc(length * sizeof(T), kind));
}
void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
+
void ObtainNewArenaForAllocation(size_t allocation_size);
+
size_t BytesAllocated() const;
+
MemStats GetMemStats() const;
+
// The BytesUsed method sums up bytes allocated from arenas in arena_head_ and nodes.
// TODO: Change BytesAllocated to this behavior?
size_t BytesUsed() const;
+ ArenaPool* GetArenaPool() const {
+ return pool_;
+ }
+
+ bool Contains(const void* ptr) const;
+
private:
static constexpr size_t kAlignment = 8;
diff --git a/runtime/base/iteration_range.h b/runtime/base/iteration_range.h
index 5a46376..6a0ef1f 100644
--- a/runtime/base/iteration_range.h
+++ b/runtime/base/iteration_range.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_BASE_ITERATION_RANGE_H_
#define ART_RUNTIME_BASE_ITERATION_RANGE_H_
+#include <iterator>
+
namespace art {
// Helper class that acts as a container for range-based loops, given an iteration
@@ -38,10 +40,15 @@ class IterationRange {
iterator cend() const { return last_; }
private:
- iterator first_;
- iterator last_;
+ const iterator first_;
+ const iterator last_;
};
+template <typename Iter>
+static inline IterationRange<Iter> MakeIterationRange(const Iter& begin_it, const Iter& end_it) {
+ return IterationRange<Iter>(begin_it, end_it);
+}
+
} // namespace art
#endif // ART_RUNTIME_BASE_ITERATION_RANGE_H_
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index c00ae78..5c59647 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -50,7 +50,6 @@ friend class test_set_name##_##individual_test##_Test
#define ART_FRIEND_TYPED_TEST(test_set_name, individual_test)\
template<typename T> ART_FRIEND_TEST(test_set_name, individual_test)
-
// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private:
// declarations in a class.
#if !defined(DISALLOW_COPY_AND_ASSIGN)
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index df79085..82db60e 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -20,6 +20,7 @@
#include <deque>
#include <queue>
#include <set>
+#include <unordered_map>
#include <vector>
#include "arena_containers.h" // For ArenaAllocatorAdapterKind.
@@ -55,6 +56,11 @@ template <typename K, typename V, typename Comparator = std::less<K>>
using ScopedArenaSafeMap =
SafeMap<K, V, Comparator, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>;
+template <typename K, typename V, class Hash = std::hash<K>, class KeyEqual = std::equal_to<K>>
+using ScopedArenaUnorderedMap =
+ std::unordered_map<K, V, Hash, KeyEqual, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>;
+
+
// Implementation details below.
template <>
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 30084d2..549eac2 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -20,6 +20,7 @@
#include <zlib.h>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/to_str.h"
#include "class_linker.h"
@@ -28,7 +29,6 @@
#include "gc/space/space.h"
#include "java_vm_ext.h"
#include "jni_internal.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -200,7 +200,7 @@ class ScopedCheck {
bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
jmethodID mid, Primitive::Type type, InvokeType invoke)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = CheckMethodID(soa, mid);
+ ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
}
@@ -270,7 +270,7 @@ class ScopedCheck {
*/
bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = CheckMethodID(soa, mid);
+ ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
}
@@ -291,7 +291,7 @@ class ScopedCheck {
*/
bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = CheckMethodID(soa, mid);
+ ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
}
@@ -344,7 +344,7 @@ class ScopedCheck {
*/
bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* traceMethod = nullptr;
+ ArtMethod* traceMethod = nullptr;
if (has_method_ && soa.Vm()->IsTracingEnabled()) {
// We need to guard some of the invocation interface's calls: a bad caller might
// use DetachCurrentThread or GetEnv on a thread that's not yet attached.
@@ -399,7 +399,7 @@ class ScopedCheck {
Thread* self = Thread::Current();
if ((flags_ & kFlag_Invocation) == 0 || self != nullptr) {
ScopedObjectAccess soa(self);
- mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
+ ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
should_trace = (traceMethod != nullptr && vm->ShouldTrace(traceMethod));
}
}
@@ -418,7 +418,7 @@ class ScopedCheck {
if (has_method_) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
+ ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
std::string methodName(PrettyMethod(traceMethod, false));
LOG(INFO) << "JNI: " << methodName << " -> " << function_name_ << "(" << msg << ")";
indent_ = methodName.size() + 1;
@@ -462,13 +462,13 @@ class ScopedCheck {
bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = soa.DecodeMethod(mid);
+ ArtMethod* method = soa.DecodeMethod(mid);
if (method == nullptr) {
AbortF("expected non-null constructor");
return false;
}
if (!method->IsConstructor() || method->IsStatic()) {
- AbortF("expected a constructor but %s: %p", PrettyTypeOf(method).c_str(), mid);
+ AbortF("expected a constructor but %s: %p", PrettyMethod(method).c_str(), mid);
return false;
}
return true;
@@ -825,7 +825,7 @@ class ScopedCheck {
}
case 'm': { // jmethodID
jmethodID mid = arg.m;
- mirror::ArtMethod* m = soa.DecodeMethod(mid);
+ ArtMethod* m = soa.DecodeMethod(mid);
*msg += PrettyMethod(m);
if (!entry) {
StringAppendF(msg, " (%p)", mid);
@@ -998,14 +998,15 @@ class ScopedCheck {
return f;
}
- mirror::ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
+ ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (mid == nullptr) {
AbortF("jmethodID was NULL");
return nullptr;
}
- mirror::ArtMethod* m = soa.DecodeMethod(mid);
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) {
+ ArtMethod* m = soa.DecodeMethod(mid);
+ // TODO: Better check here.
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m->GetDeclaringClass())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
AbortF("invalid jmethodID: %p", mid);
return nullptr;
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index d87a563..d323379 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -17,8 +17,8 @@
#ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
#define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
+#include "art_method-inl.h"
#include "gc_map.h"
-#include "mirror/art_method-inl.h"
#include "scoped_thread_state_change.h"
#include "stack_map.h"
@@ -32,7 +32,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsCalleeSaveMethod() || m->IsNative()) {
CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
}
@@ -63,7 +63,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
private:
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
CodeInfo code_info = m->GetOptimizedCodeInfo();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
@@ -104,7 +104,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
NativePcOffsetToReferenceMap map(m->GetNativeGcMap(sizeof(void*)));
const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
CHECK(ref_bitmap);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 1428749..df6703c 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -57,7 +57,7 @@ inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class**
}
inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
- mirror::ArtMethod* referrer) {
+ ArtMethod* referrer) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::String* resolved_string = declaring_class->GetDexCacheStrings()->Get(string_idx);
if (UNLIKELY(resolved_string == nullptr)) {
@@ -73,7 +73,7 @@ inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
}
inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx,
- mirror::ArtMethod* referrer) {
+ ArtMethod* referrer) {
mirror::Class* resolved_type = referrer->GetDexCacheResolvedType(type_idx);
if (UNLIKELY(resolved_type == nullptr)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
@@ -104,30 +104,27 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, ArtField* refe
return resolved_type;
}
-inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
- mirror::ArtMethod* referrer) {
- mirror::ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx);
+inline ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer) {
+ ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(
+ method_idx, image_pointer_size_);
if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) {
return nullptr;
}
return resolved_method;
}
-inline mirror::ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx,
- mirror::ArtMethod** referrer,
- InvokeType type) {
- mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer);
- if (LIKELY(resolved_method != nullptr)) {
- return resolved_method;
+inline ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx,
+ ArtMethod* referrer, InvokeType type) {
+ ArtMethod* resolved_method = GetResolvedMethod(method_idx, referrer);
+ if (UNLIKELY(resolved_method == nullptr)) {
+ mirror::Class* declaring_class = referrer->GetDeclaringClass();
+ StackHandleScope<2> hs(self);
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
+ const DexFile* dex_file = h_dex_cache->GetDexFile();
+ resolved_method = ResolveMethod(*dex_file, method_idx, h_dex_cache, h_class_loader, referrer,
+ type);
}
- mirror::Class* declaring_class = (*referrer)->GetDeclaringClass();
- StackHandleScope<3> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- HandleWrapper<mirror::ArtMethod> h_referrer(hs.NewHandleWrapper(referrer));
- const DexFile* dex_file = h_dex_cache->GetDexFile();
- resolved_method = ResolveMethod(*dex_file, method_idx, h_dex_cache, h_class_loader, h_referrer,
- type);
// Note: We cannot check here to see whether we added the method to the cache. It
// might be an erroneous class, which results in it being hidden from us.
return resolved_method;
@@ -142,8 +139,8 @@ inline ArtField* ClassLinker::GetResolvedField(
return GetResolvedField(field_idx, field_declaring_class->GetDexCache());
}
-inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer,
- bool is_static) {
+inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, ArtMethod* referrer,
+ bool is_static) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
if (UNLIKELY(resolved_field == nullptr)) {
@@ -179,12 +176,6 @@ inline mirror::ObjectArray<mirror::String>* ClassLinker::AllocStringArray(Thread
length);
}
-inline mirror::ObjectArray<mirror::ArtMethod>* ClassLinker::AllocArtMethodArray(Thread* self,
- size_t length) {
- return mirror::ObjectArray<mirror::ArtMethod>::Alloc(self,
- GetClassRoot(kJavaLangReflectArtMethodArrayClass), length);
-}
-
inline mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount) {
return down_cast<mirror::IfTable*>(
mirror::IfTable::Alloc(self, GetClassRoot(kObjectArrayClass),
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index a028942..fb2debd 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -26,8 +26,11 @@
#include <vector>
#include "art_field-inl.h"
+#include "art_method-inl.h"
+#include "base/arena_allocator.h"
#include "base/casts.h"
#include "base/logging.h"
+#include "base/scoped_arena_containers.h"
#include "base/scoped_flock.h"
#include "base/stl_util.h"
#include "base/time_utils.h"
@@ -54,7 +57,6 @@
#include "oat_file.h"
#include "oat_file_assistant.h"
#include "object_lock.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -94,9 +96,9 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) {
va_end(args);
}
-static bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+bool ClassLinker::HasInitWithString(
+ Thread* self, ClassLinker* class_linker, const char* descriptor) {
+ ArtMethod* method = self->GetCurrentMethod(nullptr);
StackHandleScope<1> hs(self);
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ?
method->GetDeclaringClass()->GetClassLoader()
@@ -110,8 +112,8 @@ static bool HasInitWithString(Thread* self, ClassLinker* class_linker, const cha
return false;
}
- mirror::ArtMethod* exception_init_method =
- exception_class->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V");
+ ArtMethod* exception_init_method = exception_class->FindDeclaredDirectMethod(
+ "<init>", "(Ljava/lang/String;)V", image_pointer_size_);
return exception_init_method != nullptr;
}
@@ -275,46 +277,51 @@ ClassLinker::ClassLinker(InternTable* intern_table)
quick_to_interpreter_bridge_trampoline_(nullptr),
image_pointer_size_(sizeof(void*)) {
CHECK(intern_table_ != nullptr);
- for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
- find_array_class_cache_[i] = GcRoot<mirror::Class>(nullptr);
+ for (auto& root : find_array_class_cache_) {
+ root = GcRoot<mirror::Class>(nullptr);
}
}
void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path) {
VLOG(startup) << "ClassLinker::Init";
- CHECK(!Runtime::Current()->GetHeap()->HasImageSpace()) << "Runtime has image. We should use it.";
+ Thread* const self = Thread::Current();
+ Runtime* const runtime = Runtime::Current();
+ gc::Heap* const heap = runtime->GetHeap();
+
+ CHECK(!heap->HasImageSpace()) << "Runtime has image. We should use it.";
CHECK(!init_done_);
+ // Use the pointer size from the runtime since we are probably creating the image.
+ image_pointer_size_ = InstructionSetPointerSize(runtime->GetInstructionSet());
+
// java_lang_Class comes first, it's needed for AllocClass
- Thread* const self = Thread::Current();
- gc::Heap* const heap = Runtime::Current()->GetHeap();
// The GC can't handle an object with a null class since we can't get the size of this object.
heap->IncrementDisableMovingGC(self);
StackHandleScope<64> hs(self); // 64 is picked arbitrarily.
+ auto class_class_size = mirror::Class::ClassClassSize(image_pointer_size_);
Handle<mirror::Class> java_lang_Class(hs.NewHandle(down_cast<mirror::Class*>(
- heap->AllocNonMovableObject<true>(self, nullptr,
- mirror::Class::ClassClassSize(),
- VoidFunctor()))));
+ heap->AllocNonMovableObject<true>(self, nullptr, class_class_size, VoidFunctor()))));
CHECK(java_lang_Class.Get() != nullptr);
mirror::Class::SetClassClass(java_lang_Class.Get());
java_lang_Class->SetClass(java_lang_Class.Get());
if (kUseBakerOrBrooksReadBarrier) {
java_lang_Class->AssertReadBarrierPointer();
}
- java_lang_Class->SetClassSize(mirror::Class::ClassClassSize());
+ java_lang_Class->SetClassSize(class_class_size);
java_lang_Class->SetPrimitiveType(Primitive::kPrimNot);
heap->DecrementDisableMovingGC(self);
// AllocClass(mirror::Class*) can now be used
// Class[] is used for reflection support.
+ auto class_array_class_size = mirror::ObjectArray<mirror::Class>::ClassSize(image_pointer_size_);
Handle<mirror::Class> class_array_class(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::ObjectArray<mirror::Class>::ClassSize())));
+ AllocClass(self, java_lang_Class.Get(), class_array_class_size)));
class_array_class->SetComponentType(java_lang_Class.Get());
// java_lang_Object comes next so that object_array_class can be created.
Handle<mirror::Class> java_lang_Object(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::Object::ClassSize())));
+ AllocClass(self, java_lang_Class.Get(), mirror::Object::ClassSize(image_pointer_size_))));
CHECK(java_lang_Object.Get() != nullptr);
// backfill Object as the super class of Class.
java_lang_Class->SetSuperClass(java_lang_Object.Get());
@@ -322,12 +329,14 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// Object[] next to hold class roots.
Handle<mirror::Class> object_array_class(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::ObjectArray<mirror::Object>::ClassSize())));
+ AllocClass(self, java_lang_Class.Get(),
+ mirror::ObjectArray<mirror::Object>::ClassSize(image_pointer_size_))));
object_array_class->SetComponentType(java_lang_Object.Get());
// Setup the char (primitive) class to be used for char[].
Handle<mirror::Class> char_class(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::Class::PrimitiveClassSize())));
+ AllocClass(self, java_lang_Class.Get(),
+ mirror::Class::PrimitiveClassSize(image_pointer_size_))));
// The primitive char class won't be initialized by
// InitializePrimitiveClass until line 459, but strings (and
// internal char arrays) will be allocated before that and the
@@ -337,21 +346,20 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// Setup the char[] class to be used for String.
Handle<mirror::Class> char_array_class(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(),
- mirror::Array::ClassSize())));
+ AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_))));
char_array_class->SetComponentType(char_class.Get());
mirror::CharArray::SetArrayClass(char_array_class.Get());
// Setup String.
Handle<mirror::Class> java_lang_String(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize())));
+ AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_))));
mirror::String::SetClass(java_lang_String.Get());
mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusResolved, self);
java_lang_String->SetStringClass();
// Setup java.lang.ref.Reference.
Handle<mirror::Class> java_lang_ref_Reference(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::Reference::ClassSize())));
+ AllocClass(self, java_lang_Class.Get(), mirror::Reference::ClassSize(image_pointer_size_))));
mirror::Reference::SetClass(java_lang_ref_Reference.Get());
java_lang_ref_Reference->SetObjectSize(mirror::Reference::InstanceSize());
mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusResolved, self);
@@ -384,14 +392,14 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// Create int array type for AllocDexCache (done in AppendToBootClassPath).
Handle<mirror::Class> int_array_class(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize())));
+ AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_))));
int_array_class->SetComponentType(GetClassRoot(kPrimitiveInt));
mirror::IntArray::SetArrayClass(int_array_class.Get());
SetClassRoot(kIntArrayClass, int_array_class.Get());
// Create long array type for AllocDexCache (done in AppendToBootClassPath).
Handle<mirror::Class> long_array_class(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize())));
+ AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_))));
long_array_class->SetComponentType(GetClassRoot(kPrimitiveLong));
mirror::LongArray::SetArrayClass(long_array_class.Get());
SetClassRoot(kLongArrayClass, long_array_class.Get());
@@ -400,35 +408,22 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// Set up DexCache. This cannot be done later since AppendToBootClassPath calls AllocDexCache.
Handle<mirror::Class> java_lang_DexCache(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::DexCache::ClassSize())));
+ AllocClass(self, java_lang_Class.Get(), mirror::DexCache::ClassSize(image_pointer_size_))));
SetClassRoot(kJavaLangDexCache, java_lang_DexCache.Get());
java_lang_DexCache->SetObjectSize(mirror::DexCache::InstanceSize());
mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusResolved, self);
- // Constructor, Method, and AbstractMethod are necessary so
- // that FindClass can link members.
-
- Handle<mirror::Class> java_lang_reflect_ArtMethod(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::ArtMethod::ClassSize())));
- CHECK(java_lang_reflect_ArtMethod.Get() != nullptr);
- size_t pointer_size = GetInstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
- java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize(pointer_size));
- SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get());
- mirror::Class::SetStatus(java_lang_reflect_ArtMethod, mirror::Class::kStatusResolved, self);
- mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get());
-
// Set up array classes for string, field, method
Handle<mirror::Class> object_array_string(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(),
- mirror::ObjectArray<mirror::String>::ClassSize())));
+ mirror::ObjectArray<mirror::String>::ClassSize(image_pointer_size_))));
object_array_string->SetComponentType(java_lang_String.Get());
SetClassRoot(kJavaLangStringArrayClass, object_array_string.Get());
- Handle<mirror::Class> object_array_art_method(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(),
- mirror::ObjectArray<mirror::ArtMethod>::ClassSize())));
- object_array_art_method->SetComponentType(java_lang_reflect_ArtMethod.Get());
- SetClassRoot(kJavaLangReflectArtMethodArrayClass, object_array_art_method.Get());
+ // Create runtime resolution and imt conflict methods.
+ runtime->SetResolutionMethod(runtime->CreateResolutionMethod());
+ runtime->SetImtConflictMethod(runtime->CreateImtConflictMethod());
+ runtime->SetImtUnimplementedMethod(runtime->CreateImtConflictMethod());
// Setup boot_class_path_ and register class_path now that we can use AllocObjectArray to create
// DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses
@@ -446,13 +441,6 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
InitializePrimitiveClass(char_class.Get(), Primitive::kPrimChar);
SetClassRoot(kPrimitiveChar, char_class.Get()); // needs descriptor
- // Create runtime resolution and imt conflict methods. Also setup the default imt.
- Runtime* runtime = Runtime::Current();
- runtime->SetResolutionMethod(runtime->CreateResolutionMethod());
- runtime->SetImtConflictMethod(runtime->CreateImtConflictMethod());
- runtime->SetImtUnimplementedMethod(runtime->CreateImtConflictMethod());
- runtime->SetDefaultImt(runtime->CreateDefaultImt(this));
-
// Set up GenericJNI entrypoint. That is mainly a hack for common_compiler_test.h so that
// we do not need friend classes or a publicly exposed setter.
quick_generic_jni_trampoline_ = GetQuickGenericJniStub();
@@ -529,13 +517,8 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// dex_cache_ fields and register them in class_table_.
CHECK_EQ(java_lang_Class.Get(), FindSystemClass(self, "Ljava/lang/Class;"));
- mirror::Class::SetStatus(java_lang_reflect_ArtMethod, mirror::Class::kStatusNotReady, self);
- CHECK_EQ(java_lang_reflect_ArtMethod.Get(),
- FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;"));
CHECK_EQ(object_array_string.Get(),
FindSystemClass(self, GetClassRootDescriptor(kJavaLangStringArrayClass)));
- CHECK_EQ(object_array_art_method.Get(),
- FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass)));
// End of special init trickery, subsequent classes may be loaded via FindSystemClass.
@@ -579,7 +562,8 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusNotReady, self);
CHECK_EQ(java_lang_ref_Reference.Get(), FindSystemClass(self, "Ljava/lang/ref/Reference;"));
CHECK_EQ(java_lang_ref_Reference->GetObjectSize(), mirror::Reference::InstanceSize());
- CHECK_EQ(java_lang_ref_Reference->GetClassSize(), mirror::Reference::ClassSize());
+ CHECK_EQ(java_lang_ref_Reference->GetClassSize(),
+ mirror::Reference::ClassSize(image_pointer_size_));
class_root = FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
class_root->SetAccessFlags(class_root->GetAccessFlags() |
kAccClassIsReference | kAccClassIsFinalizerReference);
@@ -1027,24 +1011,41 @@ const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string&
return nullptr;
}
-void ClassLinker::InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg) {
- ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
- DCHECK(obj != nullptr);
- DCHECK(class_linker != nullptr);
- if (obj->IsArtMethod()) {
- mirror::ArtMethod* method = obj->AsArtMethod();
- if (!method->IsNative()) {
- const size_t pointer_size = class_linker->image_pointer_size_;
- method->SetEntryPointFromInterpreterPtrSize(artInterpreterToInterpreterBridge, pointer_size);
- if (!method->IsRuntimeMethod() && method != Runtime::Current()->GetResolutionMethod()) {
- method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(),
- pointer_size);
- }
+static void SanityCheckArtMethod(ArtMethod* m, mirror::Class* expected_class,
+ gc::space::ImageSpace* space)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (m->IsRuntimeMethod()) {
+ CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m);
+ } else if (m->IsMiranda()) {
+ CHECK(m->GetDeclaringClass() != nullptr) << PrettyMethod(m);
+ } else if (expected_class != nullptr) {
+ CHECK_EQ(m->GetDeclaringClassUnchecked(), expected_class) << PrettyMethod(m);
+ }
+ if (space != nullptr) {
+ auto& header = space->GetImageHeader();
+ auto& methods = header.GetMethodsSection();
+ auto offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
+ CHECK(methods.Contains(offset)) << m << " not in " << methods;
+ }
+}
+
+static void SanityCheckArtMethodPointerArray(
+ mirror::PointerArray* arr, mirror::Class* expected_class, size_t pointer_size,
+ gc::space::ImageSpace* space) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(arr != nullptr);
+ for (int32_t j = 0; j < arr->GetLength(); ++j) {
+ auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size);
+ // expected_class == null means we are a dex cache.
+ if (expected_class != nullptr) {
+ CHECK(method != nullptr);
+ }
+ if (method != nullptr) {
+ SanityCheckArtMethod(method, expected_class, space);
}
}
}
-void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
+static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
CHECK(obj->GetClass() != nullptr) << "Null class " << obj;
@@ -1058,6 +1059,36 @@ void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
CHECK_EQ(fields[i][j].GetDeclaringClass(), klass);
}
}
+ auto* runtime = Runtime::Current();
+ auto* image_space = runtime->GetHeap()->GetImageSpace();
+ auto pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+ for (auto& m : klass->GetDirectMethods(pointer_size)) {
+ SanityCheckArtMethod(&m, klass, image_space);
+ }
+ for (auto& m : klass->GetVirtualMethods(pointer_size)) {
+ SanityCheckArtMethod(&m, klass, image_space);
+ }
+ auto* vtable = klass->GetVTable();
+ if (vtable != nullptr) {
+ SanityCheckArtMethodPointerArray(vtable, nullptr, pointer_size, image_space);
+ }
+ if (klass->ShouldHaveEmbeddedImtAndVTable()) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ SanityCheckArtMethod(klass->GetEmbeddedImTableEntry(i, pointer_size), nullptr, image_space);
+ }
+ for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
+ SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_space);
+ }
+ }
+ auto* iftable = klass->GetIfTable();
+ if (iftable != nullptr) {
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ if (iftable->GetMethodArrayCount(i) > 0) {
+ SanityCheckArtMethodPointerArray(iftable->GetMethodArray(i), nullptr, pointer_size,
+ image_space);
+ }
+ }
+ }
}
}
@@ -1069,8 +1100,9 @@ void ClassLinker::InitFromImage() {
Thread* const self = Thread::Current();
gc::Heap* const heap = runtime->GetHeap();
gc::space::ImageSpace* const space = heap->GetImageSpace();
- dex_cache_image_class_lookup_required_ = true;
CHECK(space != nullptr);
+ image_pointer_size_ = space->GetImageHeader().GetPointerSize();
+ dex_cache_image_class_lookup_required_ = true;
OatFile& oat_file = GetImageOatFile(space);
CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatDataBegin(), 0U);
@@ -1113,34 +1145,28 @@ void ClassLinker::InitFromImage() {
UNREACHABLE();
}
+ if (kSanityCheckObjects) {
+ SanityCheckArtMethodPointerArray(dex_cache->GetResolvedMethods(), nullptr,
+ image_pointer_size_, space);
+ }
+
CHECK_EQ(dex_file->GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum());
AppendToBootClassPath(*dex_file.get(), dex_cache);
opened_dex_files_.push_back(std::move(dex_file));
}
+ CHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_;
+
// Set classes on AbstractMethod early so that IsMethod tests can be performed during the live
// bitmap walk.
- mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod));
- size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize();
if (!runtime->IsAotCompiler()) {
- // Aot compiler supports having an image with a different pointer size than the runtime. This
- // happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may
- // also use 32 bit dex2oat on a system with 64 bit apps.
- CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*)))
- << sizeof(void*);
- }
- if (art_method_object_size == mirror::ArtMethod::InstanceSize(4)) {
- image_pointer_size_ = 4;
- } else {
- CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(8));
- image_pointer_size_ = 8;
+ // Only the Aot compiler supports having an image with a different pointer size than the
+ // runtime. This happens on the host for compile 32 bit tests since we use a 64 bit libart
+ // compiler. We may also use 32 bit dex2oat on a system with 64 bit apps.
+ CHECK_EQ(image_pointer_size_, sizeof(void*));
}
- // Set entry point to interpreter if in InterpretOnly mode.
- if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
- heap->VisitObjects(InitFromImageInterpretOnlyCallback, this);
- }
if (kSanityCheckObjects) {
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
auto* dex_cache = dex_caches->Get(i);
@@ -1154,6 +1180,27 @@ void ClassLinker::InitFromImage() {
heap->VisitObjects(SanityCheckObjectsCallback, nullptr);
}
+ // Set entry point to interpreter if in InterpretOnly mode.
+ if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
+ const auto& header = space->GetImageHeader();
+ const auto& methods = header.GetMethodsSection();
+ const auto art_method_size = ArtMethod::ObjectSize(image_pointer_size_);
+ for (uintptr_t pos = 0; pos < methods.Size(); pos += art_method_size) {
+ auto* method = reinterpret_cast<ArtMethod*>(space->Begin() + pos + methods.Offset());
+ if (kIsDebugBuild && !method->IsRuntimeMethod()) {
+ CHECK(method->GetDeclaringClass() != nullptr);
+ }
+ if (!method->IsNative()) {
+ method->SetEntryPointFromInterpreterPtrSize(
+ artInterpreterToInterpreterBridge, image_pointer_size_);
+ if (!method->IsRuntimeMethod() && method != runtime->GetResolutionMethod()) {
+ method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(),
+ image_pointer_size_);
+ }
+ }
+ }
+ }
+
// reinit class_roots_
mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass));
class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(class_roots.Get());
@@ -1185,24 +1232,55 @@ void ClassLinker::InitFromImage() {
VLOG(startup) << "ClassLinker::InitFromImage exiting";
}
+bool ClassLinker::ClassInClassTable(mirror::Class* klass) {
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ auto it = class_table_.Find(GcRoot<mirror::Class>(klass));
+ if (it == class_table_.end()) {
+ return false;
+ }
+ return it->Read() == klass;
+}
+
void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
visitor, RootInfo(kRootStickyClass));
if ((flags & kVisitRootFlagAllRoots) != 0) {
+ // Argument for how root visiting deals with ArtField and ArtMethod roots.
+ // There is 3 GC cases to handle:
+ // Non moving concurrent:
+ // This case is easy to handle since the reference members of ArtMethod and ArtFields are held
+ // live by the class and class roots. In this case we probably don't even need to call
+ // VisitNativeRoots.
+ //
+ // Moving non-concurrent:
+ // This case needs to call visit VisitNativeRoots in case the classes or dex cache arrays move.
+ // To prevent missing roots, this case needs to ensure that there is no
+ // suspend points between the point which we allocate ArtMethod arrays and place them in a
+ // class which is in the class table.
+ //
+ // Moving concurrent:
+ // Need to make sure to not copy ArtMethods without doing read barriers since the roots are
+ // marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy.
for (GcRoot<mirror::Class>& root : class_table_) {
buffered_visitor.VisitRoot(root);
- root.Read()->VisitFieldRoots(buffered_visitor);
+ if ((flags & kVisitRootFlagNonMoving) == 0) {
+ // Don't bother visiting ArtField and ArtMethod if kVisitRootFlagNonMoving is set since
+ // these roots are all reachable from the class or dex cache.
+ root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_);
+ }
}
// PreZygote classes can't move so we won't need to update fields' declaring classes.
for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
buffered_visitor.VisitRoot(root);
- root.Read()->VisitFieldRoots(buffered_visitor);
+ if ((flags & kVisitRootFlagNonMoving) == 0) {
+ root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_);
+ }
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
mirror::Class* old_ref = root.Read<kWithoutReadBarrier>();
- old_ref->VisitFieldRoots(buffered_visitor);
+ old_ref->VisitNativeRoots(buffered_visitor, image_pointer_size_);
root.VisitRoot(visitor, RootInfo(kRootStickyClass));
mirror::Class* new_ref = root.Read<kWithoutReadBarrier>();
if (UNLIKELY(new_ref != old_ref)) {
@@ -1353,7 +1431,6 @@ void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* ar
}
ClassLinker::~ClassLinker() {
- mirror::ArtMethod::ResetClass();
mirror::Class::ResetClass();
mirror::Constructor::ResetClass();
mirror::Field::ResetClass();
@@ -1376,48 +1453,47 @@ ClassLinker::~ClassLinker() {
STLDeleteElements(&oat_files_);
}
+mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
+ return down_cast<mirror::PointerArray*>(image_pointer_size_ == 8u ?
+ static_cast<mirror::Array*>(mirror::LongArray::Alloc(self, length)) :
+ static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
+}
+
mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
- gc::Heap* const heap = Runtime::Current()->GetHeap();
- StackHandleScope<16> hs(self);
- Handle<mirror::Class> dex_cache_class(hs.NewHandle(GetClassRoot(kJavaLangDexCache)));
- Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(down_cast<mirror::DexCache*>(
- heap->AllocObject<true>(self, dex_cache_class.Get(), dex_cache_class->GetObjectSize(),
- VoidFunctor()))));
+ StackHandleScope<6> hs(self);
+ auto dex_cache(hs.NewHandle(down_cast<mirror::DexCache*>(
+ GetClassRoot(kJavaLangDexCache)->AllocObject(self))));
if (dex_cache.Get() == nullptr) {
+ self->AssertPendingOOMException();
return nullptr;
}
- Handle<mirror::String>
- location(hs.NewHandle(intern_table_->InternStrong(dex_file.GetLocation().c_str())));
+ auto location(hs.NewHandle(intern_table_->InternStrong(dex_file.GetLocation().c_str())));
if (location.Get() == nullptr) {
+ self->AssertPendingOOMException();
return nullptr;
}
- Handle<mirror::ObjectArray<mirror::String>>
- strings(hs.NewHandle(AllocStringArray(self, dex_file.NumStringIds())));
+ auto strings(hs.NewHandle(AllocStringArray(self, dex_file.NumStringIds())));
if (strings.Get() == nullptr) {
+ self->AssertPendingOOMException();
return nullptr;
}
- Handle<mirror::ObjectArray<mirror::Class>>
- types(hs.NewHandle(AllocClassArray(self, dex_file.NumTypeIds())));
+ auto types(hs.NewHandle(AllocClassArray(self, dex_file.NumTypeIds())));
if (types.Get() == nullptr) {
+ self->AssertPendingOOMException();
return nullptr;
}
- Handle<mirror::ObjectArray<mirror::ArtMethod>>
- methods(hs.NewHandle(AllocArtMethodArray(self, dex_file.NumMethodIds())));
+ auto methods(hs.NewHandle(AllocPointerArray(self, dex_file.NumMethodIds())));
if (methods.Get() == nullptr) {
+ self->AssertPendingOOMException();
return nullptr;
}
- Handle<mirror::Array> fields;
- if (image_pointer_size_ == 8) {
- fields = hs.NewHandle<mirror::Array>(mirror::LongArray::Alloc(self, dex_file.NumFieldIds()));
- } else {
- fields = hs.NewHandle<mirror::Array>(mirror::IntArray::Alloc(self, dex_file.NumFieldIds()));
- }
+ auto fields(hs.NewHandle(AllocPointerArray(self, dex_file.NumFieldIds())));
if (fields.Get() == nullptr) {
+ self->AssertPendingOOMException();
return nullptr;
}
dex_cache->Init(&dex_file, location.Get(), strings.Get(), types.Get(), methods.Get(),
- fields.Get());
+ fields.Get(), image_pointer_size_);
return dex_cache.Get();
}
@@ -1430,7 +1506,7 @@ mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Cl
heap->AllocObject<true>(self, java_lang_Class, class_size, visitor) :
heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor);
if (UNLIKELY(k == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
+ self->AssertPendingOOMException();
return nullptr;
}
return k->AsClass();
@@ -1440,11 +1516,6 @@ mirror::Class* ClassLinker::AllocClass(Thread* self, uint32_t class_size) {
return AllocClass(self, GetClassRoot(kJavaLangClass), class_size);
}
-mirror::ArtMethod* ClassLinker::AllocArtMethod(Thread* self) {
- return down_cast<mirror::ArtMethod*>(
- GetClassRoot(kJavaLangReflectArtMethod)->AllocNonMovableObject(self));
-}
-
mirror::ObjectArray<mirror::StackTraceElement>* ClassLinker::AllocStackTraceElementArray(
Thread* self, size_t length) {
return mirror::ObjectArray<mirror::StackTraceElement>::Alloc(
@@ -1749,8 +1820,6 @@ mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, si
klass.Assign(GetClassRoot(kJavaLangRefReference));
} else if (strcmp(descriptor, "Ljava/lang/DexCache;") == 0) {
klass.Assign(GetClassRoot(kJavaLangDexCache));
- } else if (strcmp(descriptor, "Ljava/lang/reflect/ArtMethod;") == 0) {
- klass.Assign(GetClassRoot(kJavaLangReflectArtMethod));
}
}
@@ -1896,7 +1965,8 @@ uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
}
}
}
- return mirror::Class::ComputeClassSize(false, 0, num_8, num_16, num_32, num_64, num_ref);
+ return mirror::Class::ComputeClassSize(false, 0, num_8, num_16, num_32, num_64, num_ref,
+ image_pointer_size_);
}
OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t class_def_idx,
@@ -1945,7 +2015,7 @@ static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16
UNREACHABLE();
}
-const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, bool* found) {
+const OatFile::OatMethod ClassLinker::FindOatMethodFor(ArtMethod* method, bool* found) {
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
mirror::Class* declaring_class = method->GetDeclaringClass();
@@ -1962,7 +2032,7 @@ const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method
for (size_t i = 0; i < end; i++) {
// Check method index instead of identity in case of duplicate method definitions.
if (method->GetDexMethodIndex() ==
- declaring_class->GetVirtualMethod(i)->GetDexMethodIndex()) {
+ declaring_class->GetVirtualMethod(i, image_pointer_size_)->GetDexMethodIndex()) {
found_virtual = true;
break;
}
@@ -1985,7 +2055,7 @@ const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method
}
// Special case to get oat code without overwriting a trampoline.
-const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
+const void* ClassLinker::GetQuickOatCodeFor(ArtMethod* method) {
CHECK(!method->IsAbstract()) << PrettyMethod(method);
if (method->IsProxyMethod()) {
return GetQuickProxyInvokeHandler();
@@ -2012,7 +2082,7 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
return GetQuickToInterpreterBridge();
}
-const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
+const void* ClassLinker::GetOatMethodQuickCodeFor(ArtMethod* method) {
if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
return nullptr;
}
@@ -2043,7 +2113,7 @@ const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t cl
}
// Returns true if the method must run with interpreter, false otherwise.
-static bool NeedsInterpreter(mirror::ArtMethod* method, const void* quick_code)
+static bool NeedsInterpreter(ArtMethod* method, const void* quick_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (quick_code == nullptr) {
// No code: need interpreter.
@@ -2088,7 +2158,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
&has_oat_class);
// Link the code of methods skipped by LinkCode.
for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) {
- mirror::ArtMethod* method = klass->GetDirectMethod(method_index);
+ ArtMethod* method = klass->GetDirectMethod(method_index, image_pointer_size_);
if (!method->IsStatic()) {
// Only update static methods.
continue;
@@ -2113,10 +2183,9 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
// Ignore virtual methods on the iterator.
}
-void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
- const OatFile::OatClass* oat_class,
+void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class,
uint32_t class_def_method_index) {
- Runtime* runtime = Runtime::Current();
+ Runtime* const runtime = Runtime::Current();
if (runtime->IsAotCompiler()) {
// The following code only applies to a non-compiler runtime.
return;
@@ -2127,12 +2196,11 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
// Every kind of method should at least get an invoke stub from the oat_method.
// non-abstract methods also get their code pointers.
const OatFile::OatMethod oat_method = oat_class->GetOatMethod(class_def_method_index);
- oat_method.LinkMethod(method.Get());
+ oat_method.LinkMethod(method);
}
// Install entry point from interpreter.
- bool enter_interpreter = NeedsInterpreter(method.Get(),
- method->GetEntryPointFromQuickCompiledCode());
+ bool enter_interpreter = NeedsInterpreter(method, method->GetEntryPointFromQuickCompiledCode());
if (enter_interpreter && !method->IsNative()) {
method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge);
} else {
@@ -2221,93 +2289,83 @@ ArtField* ClassLinker::AllocArtFieldArray(Thread* self, size_t length) {
return ptr;
}
+ArtMethod* ClassLinker::AllocArtMethodArray(Thread* self, size_t length) {
+ const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(
+ Runtime::Current()->GetLinearAlloc()->Alloc(self, method_size * length));
+ CHECK_NE(ptr, 0u);
+ for (size_t i = 0; i < length; ++i) {
+ new(reinterpret_cast<void*>(ptr + i * method_size)) ArtMethod;
+ }
+ return reinterpret_cast<ArtMethod*>(ptr);
+}
+
void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
const uint8_t* class_data,
Handle<mirror::Class> klass,
const OatFile::OatClass* oat_class) {
- // Load static fields.
- ClassDataItemIterator it(dex_file, class_data);
- const size_t num_sfields = it.NumStaticFields();
- ArtField* sfields = num_sfields != 0 ? AllocArtFieldArray(self, num_sfields) : nullptr;
- for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
- CHECK_LT(i, num_sfields);
- LoadField(it, klass, &sfields[i]);
- }
- klass->SetSFields(sfields);
- klass->SetNumStaticFields(num_sfields);
- DCHECK_EQ(klass->NumStaticFields(), num_sfields);
- // Load instance fields.
- const size_t num_ifields = it.NumInstanceFields();
- ArtField* ifields = num_ifields != 0 ? AllocArtFieldArray(self, num_ifields) : nullptr;
- for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) {
- CHECK_LT(i, num_ifields);
- LoadField(it, klass, &ifields[i]);
- }
- klass->SetIFields(ifields);
- klass->SetNumInstanceFields(num_ifields);
- DCHECK_EQ(klass->NumInstanceFields(), num_ifields);
- // Note: We cannot have thread suspension until the field arrays are setup or else
- // Class::VisitFieldRoots may miss some fields.
- self->AllowThreadSuspension();
- // Load methods.
- if (it.NumDirectMethods() != 0) {
- // TODO: append direct methods to class object
- mirror::ObjectArray<mirror::ArtMethod>* directs =
- AllocArtMethodArray(self, it.NumDirectMethods());
- if (UNLIKELY(directs == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return;
- }
- klass->SetDirectMethods(directs);
- }
- if (it.NumVirtualMethods() != 0) {
- // TODO: append direct methods to class object
- mirror::ObjectArray<mirror::ArtMethod>* virtuals =
- AllocArtMethodArray(self, it.NumVirtualMethods());
- if (UNLIKELY(virtuals == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return;
- }
- klass->SetVirtualMethods(virtuals);
- }
- size_t class_def_method_index = 0;
- uint32_t last_dex_method_index = DexFile::kDexNoIndex;
- size_t last_class_def_method_index = 0;
- for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
- self->AllowThreadSuspension();
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
- if (UNLIKELY(method.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return;
+ {
+ // Note: We cannot have thread suspension until the field and method arrays are setup or else
+ // Class::VisitFieldRoots may miss some fields or methods.
+ ScopedAssertNoThreadSuspension nts(self, __FUNCTION__);
+ // Load static fields.
+ ClassDataItemIterator it(dex_file, class_data);
+ const size_t num_sfields = it.NumStaticFields();
+ ArtField* sfields = num_sfields != 0 ? AllocArtFieldArray(self, num_sfields) : nullptr;
+ for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
+ CHECK_LT(i, num_sfields);
+ LoadField(it, klass, &sfields[i]);
+ }
+ klass->SetSFields(sfields);
+ klass->SetNumStaticFields(num_sfields);
+ DCHECK_EQ(klass->NumStaticFields(), num_sfields);
+ // Load instance fields.
+ const size_t num_ifields = it.NumInstanceFields();
+ ArtField* ifields = num_ifields != 0 ? AllocArtFieldArray(self, num_ifields) : nullptr;
+ for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) {
+ CHECK_LT(i, num_ifields);
+ LoadField(it, klass, &ifields[i]);
+ }
+ klass->SetIFields(ifields);
+ klass->SetNumInstanceFields(num_ifields);
+ DCHECK_EQ(klass->NumInstanceFields(), num_ifields);
+ // Load methods.
+ if (it.NumDirectMethods() != 0) {
+ klass->SetDirectMethodsPtr(AllocArtMethodArray(self, it.NumDirectMethods()));
+ }
+ klass->SetNumDirectMethods(it.NumDirectMethods());
+ if (it.NumVirtualMethods() != 0) {
+ klass->SetVirtualMethodsPtr(AllocArtMethodArray(self, it.NumVirtualMethods()));
+ }
+ klass->SetNumVirtualMethods(it.NumVirtualMethods());
+ size_t class_def_method_index = 0;
+ uint32_t last_dex_method_index = DexFile::kDexNoIndex;
+ size_t last_class_def_method_index = 0;
+ for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
+ ArtMethod* method = klass->GetDirectMethodUnchecked(i, image_pointer_size_);
+ LoadMethod(self, dex_file, it, klass, method);
+ LinkCode(method, oat_class, class_def_method_index);
+ uint32_t it_method_index = it.GetMemberIndex();
+ if (last_dex_method_index == it_method_index) {
+ // duplicate case
+ method->SetMethodIndex(last_class_def_method_index);
+ } else {
+ method->SetMethodIndex(class_def_method_index);
+ last_dex_method_index = it_method_index;
+ last_class_def_method_index = class_def_method_index;
+ }
+ class_def_method_index++;
}
- klass->SetDirectMethod(i, method.Get());
- LinkCode(method, oat_class, class_def_method_index);
- uint32_t it_method_index = it.GetMemberIndex();
- if (last_dex_method_index == it_method_index) {
- // duplicate case
- method->SetMethodIndex(last_class_def_method_index);
- } else {
- method->SetMethodIndex(class_def_method_index);
- last_dex_method_index = it_method_index;
- last_class_def_method_index = class_def_method_index;
+ for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) {
+ ArtMethod* method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
+ LoadMethod(self, dex_file, it, klass, method);
+ DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
+ LinkCode(method, oat_class, class_def_method_index);
+ class_def_method_index++;
}
- class_def_method_index++;
+ DCHECK(!it.HasNext());
}
- for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) {
- self->AllowThreadSuspension();
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
- if (UNLIKELY(method.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return;
- }
- klass->SetVirtualMethod(i, method.Get());
- DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
- LinkCode(method, oat_class, class_def_method_index);
- class_def_method_index++;
- }
- DCHECK(!it.HasNext());
+ self->AllowThreadSuspension();
}
void ClassLinker::LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass,
@@ -2318,20 +2376,12 @@ void ClassLinker::LoadField(const ClassDataItemIterator& it, Handle<mirror::Clas
dst->SetAccessFlags(it.GetFieldAccessFlags());
}
-mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file,
- const ClassDataItemIterator& it,
- Handle<mirror::Class> klass) {
+void ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it,
+ Handle<mirror::Class> klass, ArtMethod* dst) {
uint32_t dex_method_idx = it.GetMemberIndex();
const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);
- mirror::ArtMethod* dst = AllocArtMethod(self);
- if (UNLIKELY(dst == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
- DCHECK(dst->IsArtMethod()) << PrettyDescriptor(dst->GetClass());
-
ScopedAssertNoThreadSuspension ants(self, "LoadMethod");
dst->SetDexMethodIndex(dex_method_idx);
dst->SetDeclaringClass(klass.Get());
@@ -2377,8 +2427,6 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
}
}
dst->SetAccessFlags(access_flags);
-
- return dst;
}
void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
@@ -2482,17 +2530,17 @@ mirror::DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) {
UNREACHABLE();
}
-void ClassLinker::FixupDexCaches(mirror::ArtMethod* resolution_method) {
+void ClassLinker::FixupDexCaches(ArtMethod* resolution_method) {
ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (size_t i = 0; i != dex_caches_.size(); ++i) {
- mirror::DexCache* dex_cache = GetDexCache(i);
- dex_cache->Fixup(resolution_method);
+ for (auto& dex_cache : dex_caches_) {
+ dex_cache.Read()->Fixup(resolution_method, image_pointer_size_);
}
}
mirror::Class* ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) {
- mirror::Class* klass = AllocClass(self, mirror::Class::PrimitiveClassSize());
+ mirror::Class* klass = AllocClass(self, mirror::Class::PrimitiveClassSize(image_pointer_size_));
if (UNLIKELY(klass == nullptr)) {
+ self->AssertPendingOOMException();
return nullptr;
}
return InitializePrimitiveClass(klass, type);
@@ -2593,9 +2641,6 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
new_class.Assign(GetClassRoot(kObjectArrayClass));
} else if (strcmp(descriptor, GetClassRootDescriptor(kJavaLangStringArrayClass)) == 0) {
new_class.Assign(GetClassRoot(kJavaLangStringArrayClass));
- } else if (strcmp(descriptor,
- GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass)) == 0) {
- new_class.Assign(GetClassRoot(kJavaLangReflectArtMethodArrayClass));
} else if (strcmp(descriptor, "[C") == 0) {
new_class.Assign(GetClassRoot(kCharArrayClass));
} else if (strcmp(descriptor, "[I") == 0) {
@@ -2605,8 +2650,9 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
}
}
if (new_class.Get() == nullptr) {
- new_class.Assign(AllocClass(self, mirror::Array::ClassSize()));
+ new_class.Assign(AllocClass(self, mirror::Array::ClassSize(image_pointer_size_)));
if (new_class.Get() == nullptr) {
+ self->AssertPendingOOMException();
return nullptr;
}
new_class->SetComponentType(component_type.Get());
@@ -2620,9 +2666,9 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
new_class->SetClassLoader(component_type->GetClassLoader());
mirror::Class::SetStatus(new_class, mirror::Class::kStatusLoaded, self);
{
- StackHandleScope<mirror::Class::kImtSize> hs2(self,
- Runtime::Current()->GetImtUnimplementedMethod());
- new_class->PopulateEmbeddedImtAndVTable(&hs2);
+ ArtMethod* imt[mirror::Class::kImtSize];
+ std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod());
+ new_class->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_);
}
mirror::Class::SetStatus(new_class, mirror::Class::kStatusInitialized, self);
// don't need to set new_class->SetObjectSize(..)
@@ -2732,6 +2778,18 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k
return nullptr;
}
+void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods,
+ size_t new_num_methods) {
+ // classlinker_classes_lock_ is used to guard against races between root marking and changing the
+ // direct and virtual method pointers.
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ klass->SetNumVirtualMethods(new_num_methods);
+ klass->SetVirtualMethodsPtr(new_methods);
+ if (log_new_class_table_roots_) {
+ new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
+ }
+}
+
mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* klass,
size_t hash) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
@@ -3073,7 +3131,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
void ClassLinker::EnsurePreverifiedMethods(Handle<mirror::Class> klass) {
if (!klass->IsPreverified()) {
- klass->SetPreverifiedFlagOnAllMethods();
+ klass->SetPreverifiedFlagOnAllMethods(image_pointer_size_);
klass->SetPreverified();
}
}
@@ -3164,15 +3222,15 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
Handle<mirror::Class> klass) {
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
- ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i));
+ ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i, image_pointer_size_));
}
for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
- ResolveMethodExceptionHandlerTypes(dex_file, klass->GetVirtualMethod(i));
+ ResolveMethodExceptionHandlerTypes(dex_file, klass->GetVirtualMethod(i, image_pointer_size_));
}
}
void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file,
- mirror::ArtMethod* method) {
+ ArtMethod* method) {
// similar to DexVerifier::ScanTryCatchBlocks and dex2oat's ResolveExceptionsForMethod.
const DexFile::CodeItem* code_item = dex_file.GetCodeItem(method->GetCodeItemOffset());
if (code_item == nullptr) {
@@ -3201,10 +3259,6 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file,
}
}
-static void CheckProxyConstructor(mirror::ArtMethod* constructor);
-static void CheckProxyMethod(Handle<mirror::ArtMethod> method,
- Handle<mirror::ArtMethod> prototype);
-
mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name,
jobjectArray interfaces, jobject loader,
jobjectArray methods, jobjectArray throws) {
@@ -3255,48 +3309,37 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
throws_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// Proxies have 1 direct method, the constructor
- {
- StackHandleScope<2> hs2(self);
- Handle<mirror::ObjectArray<mirror::ArtMethod>> directs =
- hs2.NewHandle(AllocArtMethodArray(self, 1));
- if (UNLIKELY(directs.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
- klass->SetDirectMethods(directs.Get());
- Handle<mirror::ArtMethod> constructor =
- hs2.NewHandle(CreateProxyConstructor(self, klass));
- if (UNLIKELY(constructor.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
- klass->SetDirectMethod(0, constructor.Get());
+ auto* directs = AllocArtMethodArray(self, 1);
+ // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we
+ // want to throw OOM in the future.
+ if (UNLIKELY(directs == nullptr)) {
+ self->AssertPendingOOMException();
+ return nullptr;
}
+ klass->SetDirectMethodsPtr(directs);
+ klass->SetNumDirectMethods(1u);
+ CreateProxyConstructor(klass, klass->GetDirectMethodUnchecked(0, image_pointer_size_));
// Create virtual method using specified prototypes.
auto h_methods = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>*>(methods));
DCHECK_EQ(h_methods->GetClass(), mirror::Method::ArrayClass())
<< PrettyClass(h_methods->GetClass());
const size_t num_virtual_methods = h_methods->GetLength();
- {
- StackHandleScope<1> hs2(self);
- Handle<mirror::ObjectArray<mirror::ArtMethod>> virtuals =
- hs2.NewHandle(AllocArtMethodArray(self, num_virtual_methods));
- if (UNLIKELY(virtuals.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
- klass->SetVirtualMethods(virtuals.Get());
+ auto* virtuals = AllocArtMethodArray(self, num_virtual_methods);
+ // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we
+ // want to throw OOM in the future.
+ if (UNLIKELY(virtuals == nullptr)) {
+ self->AssertPendingOOMException();
+ return nullptr;
}
+ klass->SetVirtualMethodsPtr(virtuals);
+ klass->SetNumVirtualMethods(num_virtual_methods);
for (size_t i = 0; i < num_virtual_methods; ++i) {
- StackHandleScope<2> hs2(self);
- Handle<mirror::ArtMethod> prototype(hs2.NewHandle(h_methods->Get(i)->GetArtMethod()));
- Handle<mirror::ArtMethod> clone(hs2.NewHandle(CreateProxyMethod(self, klass, prototype)));
- if (UNLIKELY(clone.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
- klass->SetVirtualMethod(i, clone.Get());
+ auto* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
+ auto* prototype = h_methods->Get(i)->GetArtMethod();
+ CreateProxyMethod(klass, prototype, virtual_method);
+ DCHECK(virtual_method->GetDeclaringClass() != nullptr);
+ DCHECK(prototype->GetDeclaringClass() != nullptr);
}
// The super class is java.lang.reflect.Proxy
@@ -3311,7 +3354,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
ObjectLock<mirror::Class> resolution_lock(self, klass);
// Link the fields and virtual methods, creating vtable and iftables.
// The new class will replace the old one in the class table.
- Handle<mirror::ObjectArray<mirror::Class> > h_interfaces(
+ Handle<mirror::ObjectArray<mirror::Class>> h_interfaces(
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)));
if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, &new_class)) {
mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
@@ -3338,11 +3381,11 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
// sanity checks
if (kIsDebugBuild) {
CHECK(klass->GetIFields() == nullptr);
- CheckProxyConstructor(klass->GetDirectMethod(0));
+ CheckProxyConstructor(klass->GetDirectMethod(0, image_pointer_size_));
+
for (size_t i = 0; i < num_virtual_methods; ++i) {
- StackHandleScope<2> hs2(self);
- Handle<mirror::ArtMethod> prototype(hs2.NewHandle(h_methods->Get(i)->GetArtMethod()));
- Handle<mirror::ArtMethod> virtual_method(hs2.NewHandle(klass->GetVirtualMethod(i)));
+ auto* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
+ auto* prototype = h_methods->Get(i++)->GetArtMethod();
CheckProxyMethod(virtual_method, prototype);
}
@@ -3371,8 +3414,8 @@ std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) {
return DotToDescriptor(name->ToModifiedUtf8().c_str());
}
-mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class,
- mirror::ArtMethod* proxy_method) {
+ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class,
+ ArtMethod* proxy_method) {
DCHECK(proxy_class->IsProxyClass());
DCHECK(proxy_method->IsProxyMethod());
{
@@ -3381,8 +3424,8 @@ mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class,
for (const GcRoot<mirror::DexCache>& root : dex_caches_) {
auto* dex_cache = root.Read();
if (proxy_method->HasSameDexCacheResolvedTypes(dex_cache->GetResolvedTypes())) {
- mirror::ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
- proxy_method->GetDexMethodIndex());
+ ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
+ proxy_method->GetDexMethodIndex(), image_pointer_size_);
CHECK(resolved_method != nullptr);
return resolved_method;
}
@@ -3393,74 +3436,60 @@ mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class,
UNREACHABLE();
}
-
-mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self,
- Handle<mirror::Class> klass) {
- // Create constructor for Proxy that must initialize h
- mirror::ObjectArray<mirror::ArtMethod>* proxy_direct_methods =
- GetClassRoot(kJavaLangReflectProxy)->GetDirectMethods();
- CHECK_EQ(proxy_direct_methods->GetLength(), 16);
- mirror::ArtMethod* proxy_constructor = proxy_direct_methods->Get(2);
+void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) {
+ // Create constructor for Proxy that must initialize the method.
+ CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 16u);
+ ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->GetDirectMethodUnchecked(
+ 2, image_pointer_size_);
// Ensure constructor is in dex cache so that we can use the dex cache to look up the overridden
// constructor method.
GetClassRoot(kJavaLangReflectProxy)->GetDexCache()->SetResolvedMethod(
- proxy_constructor->GetDexMethodIndex(), proxy_constructor);
+ proxy_constructor->GetDexMethodIndex(), proxy_constructor, image_pointer_size_);
// Clone the existing constructor of Proxy (our constructor would just invoke it so steal its
// code_ too)
- mirror::ArtMethod* constructor = down_cast<mirror::ArtMethod*>(proxy_constructor->Clone(self));
- if (constructor == nullptr) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
+ DCHECK(out != nullptr);
+ out->CopyFrom(proxy_constructor, image_pointer_size_);
// Make this constructor public and fix the class to be our Proxy version
- constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic);
- constructor->SetDeclaringClass(klass.Get());
- return constructor;
+ out->SetAccessFlags((out->GetAccessFlags() & ~kAccProtected) | kAccPublic);
+ out->SetDeclaringClass(klass.Get());
}
-static void CheckProxyConstructor(mirror::ArtMethod* constructor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void ClassLinker::CheckProxyConstructor(ArtMethod* constructor) const {
CHECK(constructor->IsConstructor());
- CHECK_STREQ(constructor->GetName(), "<init>");
- CHECK_STREQ(constructor->GetSignature().ToString().c_str(),
- "(Ljava/lang/reflect/InvocationHandler;)V");
+ auto* np = constructor->GetInterfaceMethodIfProxy(image_pointer_size_);
+ CHECK_STREQ(np->GetName(), "<init>");
+ CHECK_STREQ(np->GetSignature().ToString().c_str(), "(Ljava/lang/reflect/InvocationHandler;)V");
DCHECK(constructor->IsPublic());
}
-mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
- Handle<mirror::Class> klass,
- Handle<mirror::ArtMethod> prototype) {
+void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype,
+ ArtMethod* out) {
// Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
// prototype method
auto* dex_cache = prototype->GetDeclaringClass()->GetDexCache();
// Avoid dirtying the dex cache unless we need to.
- if (dex_cache->GetResolvedMethod(prototype->GetDexMethodIndex()) != prototype.Get()) {
- dex_cache->SetResolvedMethod(prototype->GetDexMethodIndex(), prototype.Get());
+ if (dex_cache->GetResolvedMethod(prototype->GetDexMethodIndex(), image_pointer_size_) !=
+ prototype) {
+ dex_cache->SetResolvedMethod(
+ prototype->GetDexMethodIndex(), prototype, image_pointer_size_);
}
// We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize
// as necessary
- mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(prototype->Clone(self));
- if (UNLIKELY(method == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
+ DCHECK(out != nullptr);
+ out->CopyFrom(prototype, image_pointer_size_);
// Set class to be the concrete proxy class and clear the abstract flag, modify exceptions to
// the intersection of throw exceptions as defined in Proxy
- method->SetDeclaringClass(klass.Get());
- method->SetAccessFlags((method->GetAccessFlags() & ~kAccAbstract) | kAccFinal);
+ out->SetDeclaringClass(klass.Get());
+ out->SetAccessFlags((out->GetAccessFlags() & ~kAccAbstract) | kAccFinal);
// At runtime the method looks like a reference and argument saving method, clone the code
// related parameters from this method.
- method->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler());
- method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
-
- return method;
+ out->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler());
+ out->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
}
-static void CheckProxyMethod(Handle<mirror::ArtMethod> method,
- Handle<mirror::ArtMethod> prototype)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void ClassLinker::CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const {
// Basic sanity
CHECK(!prototype->IsFinal());
CHECK(method->IsFinal());
@@ -3468,26 +3497,26 @@ static void CheckProxyMethod(Handle<mirror::ArtMethod> method,
// The proxy method doesn't have its own dex cache or dex file and so it steals those of its
// interface prototype. The exception to this are Constructors and the Class of the Proxy itself.
- CHECK(prototype->HasSameDexCacheResolvedMethods(method.Get()));
- CHECK(prototype->HasSameDexCacheResolvedTypes(method.Get()));
- CHECK_EQ(prototype->GetDeclaringClass()->GetDexCache(), method->GetDexCache());
+ CHECK(prototype->HasSameDexCacheResolvedMethods(method));
+ CHECK(prototype->HasSameDexCacheResolvedTypes(method));
+ auto* np = method->GetInterfaceMethodIfProxy(image_pointer_size_);
+ CHECK_EQ(prototype->GetDeclaringClass()->GetDexCache(), np->GetDexCache());
CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
- CHECK_STREQ(method->GetName(), prototype->GetName());
- CHECK_STREQ(method->GetShorty(), prototype->GetShorty());
+ CHECK_STREQ(np->GetName(), prototype->GetName());
+ CHECK_STREQ(np->GetShorty(), prototype->GetShorty());
// More complex sanity - via dex cache
- CHECK_EQ(method->GetInterfaceMethodIfProxy()->GetReturnType(), prototype->GetReturnType());
+ CHECK_EQ(np->GetReturnType(), prototype->GetReturnType());
}
-static bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics,
- bool can_init_parents)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, bool can_init_statics,
+ bool can_init_parents) {
if (can_init_statics && can_init_parents) {
return true;
}
if (!can_init_statics) {
// Check if there's a class initializer.
- mirror::ArtMethod* clinit = klass->FindClassInitializer();
+ ArtMethod* clinit = klass->FindClassInitializer(image_pointer_size_);
if (clinit != nullptr) {
return false;
}
@@ -3500,17 +3529,14 @@ static bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics,
}
}
}
- if (!klass->IsInterface() && klass->HasSuperClass()) {
- mirror::Class* super_class = klass->GetSuperClass();
- if (!can_init_parents && !super_class->IsInitialized()) {
- return false;
- } else {
- if (!CanWeInitializeClass(super_class, can_init_statics, can_init_parents)) {
- return false;
- }
- }
+ if (klass->IsInterface() || !klass->HasSuperClass()) {
+ return true;
}
- return true;
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (!can_init_parents && !super_class->IsInitialized()) {
+ return false;
+ }
+ return CanWeInitializeClass(super_class, can_init_statics, can_init_parents);
}
bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
@@ -3670,7 +3696,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
}
}
- mirror::ArtMethod* clinit = klass->FindClassInitializer();
+ ArtMethod* clinit = klass->FindClassInitializer(image_pointer_size_);
if (clinit != nullptr) {
CHECK(can_init_statics);
JValue result;
@@ -3761,8 +3787,8 @@ bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* se
static void ThrowSignatureCheckResolveReturnTypeException(Handle<mirror::Class> klass,
Handle<mirror::Class> super_klass,
- Handle<mirror::ArtMethod> method,
- mirror::ArtMethod* m)
+ ArtMethod* method,
+ ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(Thread::Current()->IsExceptionPending());
DCHECK(!m->IsProxyMethod());
@@ -3776,7 +3802,7 @@ static void ThrowSignatureCheckResolveReturnTypeException(Handle<mirror::Class>
"While checking class %s method %s signature against %s %s: "
"Failed to resolve return type %s with %s",
PrettyDescriptor(klass.Get()).c_str(),
- PrettyMethod(method.Get()).c_str(),
+ PrettyMethod(method).c_str(),
super_klass->IsInterface() ? "interface" : "superclass",
PrettyDescriptor(super_klass.Get()).c_str(),
return_type.c_str(), class_loader.c_str());
@@ -3784,8 +3810,8 @@ static void ThrowSignatureCheckResolveReturnTypeException(Handle<mirror::Class>
static void ThrowSignatureCheckResolveArgException(Handle<mirror::Class> klass,
Handle<mirror::Class> super_klass,
- Handle<mirror::ArtMethod> method,
- mirror::ArtMethod* m,
+ ArtMethod* method,
+ ArtMethod* m,
uint32_t index, uint32_t arg_type_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(Thread::Current()->IsExceptionPending());
@@ -3797,7 +3823,7 @@ static void ThrowSignatureCheckResolveArgException(Handle<mirror::Class> klass,
"While checking class %s method %s signature against %s %s: "
"Failed to resolve arg %u type %s with %s",
PrettyDescriptor(klass.Get()).c_str(),
- PrettyMethod(method.Get()).c_str(),
+ PrettyMethod(method).c_str(),
super_klass->IsInterface() ? "interface" : "superclass",
PrettyDescriptor(super_klass.Get()).c_str(),
index, arg_type.c_str(), class_loader.c_str());
@@ -3805,13 +3831,13 @@ static void ThrowSignatureCheckResolveArgException(Handle<mirror::Class> klass,
static void ThrowSignatureMismatch(Handle<mirror::Class> klass,
Handle<mirror::Class> super_klass,
- Handle<mirror::ArtMethod> method,
+ ArtMethod* method,
const std::string& error_msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ThrowLinkageError(klass.Get(),
"Class %s method %s resolves differently in %s %s: %s",
PrettyDescriptor(klass.Get()).c_str(),
- PrettyMethod(method.Get()).c_str(),
+ PrettyMethod(method).c_str(),
super_klass->IsInterface() ? "interface" : "superclass",
PrettyDescriptor(super_klass.Get()).c_str(),
error_msg.c_str());
@@ -3820,19 +3846,19 @@ static void ThrowSignatureMismatch(Handle<mirror::Class> klass,
static bool HasSameSignatureWithDifferentClassLoaders(Thread* self,
Handle<mirror::Class> klass,
Handle<mirror::Class> super_klass,
- Handle<mirror::ArtMethod> method1,
- Handle<mirror::ArtMethod> method2)
+ ArtMethod* method1,
+ ArtMethod* method2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
{
StackHandleScope<1> hs(self);
Handle<mirror::Class> return_type(hs.NewHandle(method1->GetReturnType()));
if (UNLIKELY(return_type.Get() == nullptr)) {
- ThrowSignatureCheckResolveReturnTypeException(klass, super_klass, method1, method1.Get());
+ ThrowSignatureCheckResolveReturnTypeException(klass, super_klass, method1, method1);
return false;
}
mirror::Class* other_return_type = method2->GetReturnType();
if (UNLIKELY(other_return_type == nullptr)) {
- ThrowSignatureCheckResolveReturnTypeException(klass, super_klass, method1, method2.Get());
+ ThrowSignatureCheckResolveReturnTypeException(klass, super_klass, method1, method2);
return false;
}
if (UNLIKELY(other_return_type != return_type.Get())) {
@@ -3851,7 +3877,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self,
if (types2 != nullptr && types2->Size() != 0) {
ThrowSignatureMismatch(klass, super_klass, method1,
StringPrintf("Type list mismatch with %s",
- PrettyMethod(method2.Get(), true).c_str()));
+ PrettyMethod(method2, true).c_str()));
return false;
}
return true;
@@ -3859,7 +3885,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self,
if (types1->Size() != 0) {
ThrowSignatureMismatch(klass, super_klass, method1,
StringPrintf("Type list mismatch with %s",
- PrettyMethod(method2.Get(), true).c_str()));
+ PrettyMethod(method2, true).c_str()));
return false;
}
return true;
@@ -3868,7 +3894,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self,
if (UNLIKELY(num_types != types2->Size())) {
ThrowSignatureMismatch(klass, super_klass, method1,
StringPrintf("Type list mismatch with %s",
- PrettyMethod(method2.Get(), true).c_str()));
+ PrettyMethod(method2, true).c_str()));
return false;
}
for (uint32_t i = 0; i < num_types; ++i) {
@@ -3878,7 +3904,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self,
method1->GetClassFromTypeIndex(param_type_idx, true)));
if (UNLIKELY(param_type.Get() == nullptr)) {
ThrowSignatureCheckResolveArgException(klass, super_klass, method1,
- method1.Get(), i, param_type_idx);
+ method1, i, param_type_idx);
return false;
}
uint32_t other_param_type_idx = types2->GetTypeItem(i).type_idx_;
@@ -3886,7 +3912,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self,
method2->GetClassFromTypeIndex(other_param_type_idx, true);
if (UNLIKELY(other_param_type == nullptr)) {
ThrowSignatureCheckResolveArgException(klass, super_klass, method1,
- method2.Get(), i, other_param_type_idx);
+ method2, i, other_param_type_idx);
return false;
}
if (UNLIKELY(param_type.Get() != other_param_type)) {
@@ -3910,19 +3936,17 @@ bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) {
}
// Begin with the methods local to the superclass.
Thread* self = Thread::Current();
- StackHandleScope<3> hs(self);
+ StackHandleScope<1> hs(self);
MutableHandle<mirror::Class> super_klass(hs.NewHandle<mirror::Class>(nullptr));
- MutableHandle<mirror::ArtMethod> h_m(hs.NewHandle<mirror::ArtMethod>(nullptr));
- MutableHandle<mirror::ArtMethod> super_h_m(hs.NewHandle<mirror::ArtMethod>(nullptr));
if (klass->HasSuperClass() &&
klass->GetClassLoader() != klass->GetSuperClass()->GetClassLoader()) {
super_klass.Assign(klass->GetSuperClass());
for (int i = klass->GetSuperClass()->GetVTableLength() - 1; i >= 0; --i) {
- h_m.Assign(klass->GetVTableEntry(i));
- super_h_m.Assign(klass->GetSuperClass()->GetVTableEntry(i));
- if (h_m.Get() != super_h_m.Get()) {
+ auto* m = klass->GetVTableEntry(i, image_pointer_size_);
+ auto* super_m = klass->GetSuperClass()->GetVTableEntry(i, image_pointer_size_);
+ if (m != super_m) {
if (UNLIKELY(!HasSameSignatureWithDifferentClassLoaders(self, klass, super_klass,
- h_m, super_h_m))) {
+ m, super_m))) {
self->AssertPendingException();
return false;
}
@@ -3934,11 +3958,12 @@ bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) {
if (klass->GetClassLoader() != super_klass->GetClassLoader()) {
uint32_t num_methods = super_klass->NumVirtualMethods();
for (uint32_t j = 0; j < num_methods; ++j) {
- h_m.Assign(klass->GetIfTable()->GetMethodArray(i)->GetWithoutChecks(j));
- super_h_m.Assign(super_klass->GetVirtualMethod(j));
- if (h_m.Get() != super_h_m.Get()) {
+ auto* m = klass->GetIfTable()->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
+ j, image_pointer_size_);
+ auto* super_m = super_klass->GetVirtualMethod(j, image_pointer_size_);
+ if (m != super_m) {
if (UNLIKELY(!HasSameSignatureWithDifferentClassLoaders(self, klass, super_klass,
- h_m, super_h_m))) {
+ m, super_m))) {
self->AssertPendingException();
return false;
}
@@ -3967,8 +3992,10 @@ bool ClassLinker::EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool
return success;
}
-void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class) {
+void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class,
+ mirror::Class* new_class) {
ArtField* fields = new_class->GetIFields();
+ DCHECK_EQ(temp_class->NumInstanceFields(), new_class->NumInstanceFields());
for (size_t i = 0, count = new_class->NumInstanceFields(); i < count; i++) {
if (fields[i].GetDeclaringClass() == temp_class) {
fields[i].SetDeclaringClass(new_class);
@@ -3976,27 +4003,24 @@ void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror
}
fields = new_class->GetSFields();
+ DCHECK_EQ(temp_class->NumStaticFields(), new_class->NumStaticFields());
for (size_t i = 0, count = new_class->NumStaticFields(); i < count; i++) {
if (fields[i].GetDeclaringClass() == temp_class) {
fields[i].SetDeclaringClass(new_class);
}
}
- mirror::ObjectArray<mirror::ArtMethod>* methods = new_class->GetDirectMethods();
- if (methods != nullptr) {
- for (int index = 0; index < methods->GetLength(); index ++) {
- if (methods->Get(index)->GetDeclaringClass() == temp_class) {
- methods->Get(index)->SetDeclaringClass(new_class);
- }
+ DCHECK_EQ(temp_class->NumDirectMethods(), new_class->NumDirectMethods());
+ for (auto& method : new_class->GetDirectMethods(image_pointer_size_)) {
+ if (method.GetDeclaringClass() == temp_class) {
+ method.SetDeclaringClass(new_class);
}
}
- methods = new_class->GetVirtualMethods();
- if (methods != nullptr) {
- for (int index = 0; index < methods->GetLength(); index ++) {
- if (methods->Get(index)->GetDeclaringClass() == temp_class) {
- methods->Get(index)->SetDeclaringClass(new_class);
- }
+ DCHECK_EQ(temp_class->NumVirtualMethods(), new_class->NumVirtualMethods());
+ for (auto& method : new_class->GetVirtualMethods(image_pointer_size_)) {
+ if (method.GetDeclaringClass() == temp_class) {
+ method.SetDeclaringClass(new_class);
}
}
}
@@ -4009,9 +4033,9 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror:
if (!LinkSuperClass(klass)) {
return false;
}
- StackHandleScope<mirror::Class::kImtSize> imt_handle_scope(
- self, Runtime::Current()->GetImtUnimplementedMethod());
- if (!LinkMethods(self, klass, interfaces, &imt_handle_scope)) {
+ ArtMethod* imt[mirror::Class::kImtSize];
+ std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod());
+ if (!LinkMethods(self, klass, interfaces, imt)) {
return false;
}
if (!LinkInstanceFields(self, klass)) {
@@ -4030,7 +4054,7 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror:
CHECK_EQ(klass->GetClassSize(), class_size) << PrettyDescriptor(klass.Get());
if (klass->ShouldHaveEmbeddedImtAndVTable()) {
- klass->PopulateEmbeddedImtAndVTable(&imt_handle_scope);
+ klass->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_);
}
// This will notify waiters on klass that saw the not yet resolved
@@ -4041,10 +4065,9 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror:
CHECK(!klass->IsResolved());
// Retire the temporary class and create the correctly sized resolved class.
StackHandleScope<1> hs(self);
- auto h_new_class = hs.NewHandle<mirror::Class>(
- klass->CopyOf(self, class_size, &imt_handle_scope));
+ auto h_new_class = hs.NewHandle(klass->CopyOf(self, class_size, imt, image_pointer_size_));
if (UNLIKELY(h_new_class.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // Expect an OOME.
+ self->AssertPendingOOMException();
mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return false;
}
@@ -4356,7 +4379,7 @@ bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) {
// Populate the class vtable and itable. Compute return type indices.
bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- StackHandleScope<mirror::Class::kImtSize>* out_imt) {
+ ArtMethod** out_imt) {
self->AllowThreadSuspension();
if (klass->IsInterface()) {
// No vtable.
@@ -4366,7 +4389,7 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
return false;
}
for (size_t i = 0; i < count; ++i) {
- klass->GetVirtualMethodDuringLinking(i)->SetMethodIndex(i);
+ klass->GetVirtualMethodDuringLinking(i, image_pointer_size_)->SetMethodIndex(i);
}
} else if (!LinkVirtualMethods(self, klass)) { // Link virtual methods first.
return false;
@@ -4379,7 +4402,7 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
// caches in the implementation below.
class MethodNameAndSignatureComparator FINAL : public ValueObject {
public:
- explicit MethodNameAndSignatureComparator(mirror::ArtMethod* method)
+ explicit MethodNameAndSignatureComparator(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())),
name_(nullptr), name_len_(0) {
@@ -4393,7 +4416,7 @@ class MethodNameAndSignatureComparator FINAL : public ValueObject {
return name_;
}
- bool HasSameNameAndSignature(mirror::ArtMethod* other)
+ bool HasSameNameAndSignature(ArtMethod* other)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!other->IsProxyMethod()) << PrettyMethod(other);
const DexFile* other_dex_file = other->GetDexFile();
@@ -4424,13 +4447,16 @@ class MethodNameAndSignatureComparator FINAL : public ValueObject {
class LinkVirtualHashTable {
public:
- LinkVirtualHashTable(Handle<mirror::Class> klass, size_t hash_size, uint32_t* hash_table)
- : klass_(klass), hash_size_(hash_size), hash_table_(hash_table) {
+ LinkVirtualHashTable(Handle<mirror::Class> klass, size_t hash_size, uint32_t* hash_table,
+ size_t image_pointer_size)
+ : klass_(klass), hash_size_(hash_size), hash_table_(hash_table),
+ image_pointer_size_(image_pointer_size) {
std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_);
}
void Add(uint32_t virtual_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking(virtual_method_index);
- const char* name = local_method->GetName();
+ ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking(
+ virtual_method_index, image_pointer_size_);
+ const char* name = local_method->GetInterfaceMethodIfProxy(image_pointer_size_)->GetName();
uint32_t hash = ComputeModifiedUtf8Hash(name);
uint32_t index = hash % hash_size_;
// Linear probe until we have an empty slot.
@@ -4454,9 +4480,10 @@ class LinkVirtualHashTable {
break;
}
if (value != removed_index_) { // This signifies not already overriden.
- mirror::ArtMethod* virtual_method =
- klass_->GetVirtualMethodDuringLinking(value);
- if (comparator->HasSameNameAndSignature(virtual_method->GetInterfaceMethodIfProxy())) {
+ ArtMethod* virtual_method =
+ klass_->GetVirtualMethodDuringLinking(value, image_pointer_size_);
+ if (comparator->HasSameNameAndSignature(
+ virtual_method->GetInterfaceMethodIfProxy(image_pointer_size_))) {
hash_table_[index] = removed_index_;
return value;
}
@@ -4478,6 +4505,7 @@ class LinkVirtualHashTable {
Handle<mirror::Class> klass_;
const size_t hash_size_;
uint32_t* const hash_table_;
+ const size_t image_pointer_size_;
};
const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max();
@@ -4490,30 +4518,32 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
const size_t max_count = num_virtual_methods + super_vtable_length;
StackHandleScope<2> hs(self);
Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass()));
- MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable;
+ MutableHandle<mirror::PointerArray> vtable;
if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
- vtable = hs.NewHandle(AllocArtMethodArray(self, max_count));
+ vtable = hs.NewHandle(AllocPointerArray(self, max_count));
if (UNLIKELY(vtable.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
+ self->AssertPendingOOMException();
return false;
}
for (size_t i = 0; i < super_vtable_length; i++) {
- vtable->SetWithoutChecks<false>(i, super_class->GetEmbeddedVTableEntry(i));
+ vtable->SetElementPtrSize(
+ i, super_class->GetEmbeddedVTableEntry(i, image_pointer_size_), image_pointer_size_);
}
if (num_virtual_methods == 0) {
klass->SetVTable(vtable.Get());
return true;
}
} else {
- mirror::ObjectArray<mirror::ArtMethod>* super_vtable = super_class->GetVTable();
+ auto* super_vtable = super_class->GetVTable();
CHECK(super_vtable != nullptr) << PrettyClass(super_class.Get());
if (num_virtual_methods == 0) {
klass->SetVTable(super_vtable);
return true;
}
- vtable = hs.NewHandle(super_vtable->CopyOf(self, max_count));
+ vtable = hs.NewHandle(down_cast<mirror::PointerArray*>(
+ super_vtable->CopyOf(self, max_count)));
if (UNLIKELY(vtable.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
+ self->AssertPendingOOMException();
return false;
}
}
@@ -4537,21 +4567,24 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
hash_heap_storage.reset(new uint32_t[hash_table_size]);
hash_table_ptr = hash_heap_storage.get();
}
- LinkVirtualHashTable hash_table(klass, hash_table_size, hash_table_ptr);
+ LinkVirtualHashTable hash_table(klass, hash_table_size, hash_table_ptr, image_pointer_size_);
// Add virtual methods to the hash table.
for (size_t i = 0; i < num_virtual_methods; ++i) {
+ DCHECK(klass->GetVirtualMethodDuringLinking(
+ i, image_pointer_size_)->GetDeclaringClass() != nullptr);
hash_table.Add(i);
}
// Loop through each super vtable method and see if they are overriden by a method we added to
// the hash table.
for (size_t j = 0; j < super_vtable_length; ++j) {
// Search the hash table to see if we are overidden by any method.
- mirror::ArtMethod* super_method = vtable->GetWithoutChecks(j);
+ ArtMethod* super_method = vtable->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
MethodNameAndSignatureComparator super_method_name_comparator(
- super_method->GetInterfaceMethodIfProxy());
+ super_method->GetInterfaceMethodIfProxy(image_pointer_size_));
uint32_t hash_index = hash_table.FindAndRemove(&super_method_name_comparator);
if (hash_index != hash_table.GetNotFoundIndex()) {
- mirror::ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(hash_index);
+ ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(
+ hash_index, image_pointer_size_);
if (klass->CanAccessMember(super_method->GetDeclaringClass(),
super_method->GetAccessFlags())) {
if (super_method->IsFinal()) {
@@ -4560,7 +4593,7 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
super_method->GetDeclaringClassDescriptor());
return false;
}
- vtable->SetWithoutChecks<false>(j, virtual_method);
+ vtable->SetElementPtrSize(j, virtual_method, image_pointer_size_);
virtual_method->SetMethodIndex(j);
} else {
LOG(WARNING) << "Before Android 4.1, method " << PrettyMethod(virtual_method)
@@ -4572,13 +4605,13 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
// Add the non overridden methods at the end.
size_t actual_count = super_vtable_length;
for (size_t i = 0; i < num_virtual_methods; ++i) {
- mirror::ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
+ ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_);
size_t method_idx = local_method->GetMethodIndexDuringLinking();
if (method_idx < super_vtable_length &&
- local_method == vtable->GetWithoutChecks(method_idx)) {
+ local_method == vtable->GetElementPtrSize<ArtMethod*>(method_idx, image_pointer_size_)) {
continue;
}
- vtable->SetWithoutChecks<false>(actual_count, local_method);
+ vtable->SetElementPtrSize(actual_count, local_method, image_pointer_size_);
local_method->SetMethodIndex(actual_count);
++actual_count;
}
@@ -4589,9 +4622,9 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
// Shrink vtable if possible
CHECK_LE(actual_count, max_count);
if (actual_count < max_count) {
- vtable.Assign(vtable->CopyOf(self, actual_count));
+ vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, actual_count)));
if (UNLIKELY(vtable.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
+ self->AssertPendingOOMException();
return false;
}
}
@@ -4603,14 +4636,14 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
static_cast<int>(num_virtual_methods));
return false;
}
- mirror::ObjectArray<mirror::ArtMethod>* vtable = AllocArtMethodArray(self, num_virtual_methods);
+ auto* vtable = AllocPointerArray(self, num_virtual_methods);
if (UNLIKELY(vtable == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
+ self->AssertPendingOOMException();
return false;
}
for (size_t i = 0; i < num_virtual_methods; ++i) {
- mirror::ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i);
- vtable->SetWithoutChecks<false>(i, virtual_method);
+ ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_);
+ vtable->SetElementPtrSize(i, virtual_method, image_pointer_size_);
virtual_method->SetMethodIndex(i & 0xFFFF);
}
klass->SetVTable(vtable);
@@ -4620,7 +4653,7 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- StackHandleScope<mirror::Class::kImtSize>* out_imt) {
+ ArtMethod** out_imt) {
StackHandleScope<3> hs(self);
Runtime* const runtime = Runtime::Current();
const bool has_superclass = klass->HasSuperClass();
@@ -4628,6 +4661,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
const bool have_interfaces = interfaces.Get() != nullptr;
const size_t num_interfaces =
have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
+ const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_);
if (num_interfaces == 0) {
if (super_ifcount == 0) {
// Class implements no interfaces.
@@ -4666,7 +4700,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
}
MutableHandle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
if (UNLIKELY(iftable.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
+ self->AssertPendingOOMException();
return false;
}
if (super_ifcount != 0) {
@@ -4715,9 +4749,10 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
// Shrink iftable in case duplicates were found
if (idx < ifcount) {
DCHECK_NE(num_interfaces, 0U);
- iftable.Assign(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
+ iftable.Assign(down_cast<mirror::IfTable*>(
+ iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
if (UNLIKELY(iftable.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
+ self->AssertPendingOOMException();
return false;
}
ifcount = idx;
@@ -4729,15 +4764,18 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
if (klass->IsInterface()) {
return true;
}
- size_t miranda_list_size = 0;
- size_t max_miranda_methods = 0; // The max size of miranda_list.
- for (size_t i = 0; i < ifcount; ++i) {
- max_miranda_methods += iftable->GetInterface(i)->NumVirtualMethods();
- }
- MutableHandle<mirror::ObjectArray<mirror::ArtMethod>>
- miranda_list(hs.NewHandle(AllocArtMethodArray(self, max_miranda_methods)));
- MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
- hs.NewHandle(klass->GetVTableDuringLinking()));
+ // These are allocated on the heap to begin, we then transfer to linear alloc when we re-create
+ // the virtual methods array.
+ // Need to use low 4GB arenas for compiler or else the pointers wont fit in 32 bit method array
+ // during cross compilation.
+ // Use the linear alloc pool since this one is in the low 4gb for the compiler.
+ ArenaStack stack(runtime->GetLinearAlloc()->GetArenaPool());
+ ScopedArenaAllocator allocator(&stack);
+ ScopedArenaVector<ArtMethod*> miranda_methods(allocator.Adapter());
+
+ MutableHandle<mirror::PointerArray> vtable(hs.NewHandle(klass->GetVTableDuringLinking()));
+ ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
+ ArtMethod* const conflict_method = runtime->GetImtConflictMethod();
// Copy the IMT from the super class if possible.
bool extend_super_iftable = false;
if (has_superclass) {
@@ -4745,12 +4783,11 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
extend_super_iftable = true;
if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- out_imt->SetReference(i, super_class->GetEmbeddedImTableEntry(i));
+ out_imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_);
}
} else {
// No imt in the super class, need to reconstruct from the iftable.
mirror::IfTable* if_table = super_class->GetIfTable();
- mirror::ArtMethod* conflict_method = runtime->GetImtConflictMethod();
const size_t length = super_class->GetIfTableCount();
for (size_t i = 0; i < length; ++i) {
mirror::Class* interface = iftable->GetInterface(i);
@@ -4760,63 +4797,84 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
if (method_array_count == 0) {
continue;
}
- mirror::ObjectArray<mirror::ArtMethod>* method_array = if_table->GetMethodArray(i);
+ auto* method_array = if_table->GetMethodArray(i);
for (size_t j = 0; j < num_virtuals; ++j) {
- mirror::ArtMethod* method = method_array->GetWithoutChecks(j);
+ auto method = method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
+ DCHECK(method != nullptr) << PrettyClass(super_class);
if (method->IsMiranda()) {
continue;
}
- mirror::ArtMethod* interface_method = interface->GetVirtualMethod(j);
+ ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
- mirror::ArtMethod* imt_ref = out_imt->GetReference(imt_index)->AsArtMethod();
- if (imt_ref == runtime->GetImtUnimplementedMethod()) {
- out_imt->SetReference(imt_index, method);
+ auto*& imt_ref = out_imt[imt_index];
+ if (imt_ref == unimplemented_method) {
+ imt_ref = method;
} else if (imt_ref != conflict_method) {
- out_imt->SetReference(imt_index, conflict_method);
+ imt_ref = conflict_method;
}
}
}
}
}
+ // Allocate method arrays before since we don't want miss visiting miranda method roots due to
+ // thread suspension.
for (size_t i = 0; i < ifcount; ++i) {
- self->AllowThreadSuspension();
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
- StackHandleScope<2> hs2(self);
const bool is_super = i < super_ifcount;
const bool super_interface = is_super && extend_super_iftable;
- Handle<mirror::ObjectArray<mirror::ArtMethod>> method_array;
- Handle<mirror::ObjectArray<mirror::ArtMethod>> input_array;
+ mirror::PointerArray* method_array;
if (super_interface) {
mirror::IfTable* if_table = klass->GetSuperClass()->GetIfTable();
DCHECK(if_table != nullptr);
DCHECK(if_table->GetMethodArray(i) != nullptr);
// If we are working on a super interface, try extending the existing method array.
- method_array = hs2.NewHandle(if_table->GetMethodArray(i)->Clone(self)->
- AsObjectArray<mirror::ArtMethod>());
+ method_array = down_cast<mirror::PointerArray*>(if_table->GetMethodArray(i)->Clone(self));
+ } else {
+ method_array = AllocPointerArray(self, num_methods);
+ }
+ if (UNLIKELY(method_array == nullptr)) {
+ self->AssertPendingOOMException();
+ return false;
+ }
+ iftable->SetMethodArray(i, method_array);
+ }
+ }
+
+ auto* old_cause = self->StartAssertNoThreadSuspension(
+ "Copying ArtMethods for LinkInterfaceMethods");
+ for (size_t i = 0; i < ifcount; ++i) {
+ size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
+ if (num_methods > 0) {
+ StackHandleScope<2> hs2(self);
+ const bool is_super = i < super_ifcount;
+ const bool super_interface = is_super && extend_super_iftable;
+ auto method_array(hs2.NewHandle(iftable->GetMethodArray(i)));
+
+ ArtMethod* input_virtual_methods = nullptr;
+ Handle<mirror::PointerArray> input_vtable_array = NullHandle<mirror::PointerArray>();
+ int32_t input_array_length = 0;
+ if (super_interface) {
// We are overwriting a super class interface, try to only virtual methods instead of the
// whole vtable.
- input_array = hs2.NewHandle(klass->GetVirtualMethods());
+ input_virtual_methods = klass->GetVirtualMethodsPtr();
+ input_array_length = klass->NumVirtualMethods();
} else {
- method_array = hs2.NewHandle(AllocArtMethodArray(self, num_methods));
- // A new interface, we need the whole vtable incase a new interface method is implemented
+ // A new interface, we need the whole vtable in case a new interface method is implemented
// in the whole superclass.
- input_array = vtable;
- }
- if (UNLIKELY(method_array.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return false;
+ input_vtable_array = vtable;
+ input_array_length = input_vtable_array->GetLength();
}
- iftable->SetMethodArray(i, method_array.Get());
- if (input_array.Get() == nullptr) {
+ if (input_array_length == 0) {
// If the added virtual methods is empty, do nothing.
DCHECK(super_interface);
continue;
}
for (size_t j = 0; j < num_methods; ++j) {
- mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j);
+ auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(
+ j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
- interface_method->GetInterfaceMethodIfProxy());
+ interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
int32_t k;
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -4826,108 +4884,161 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
// it -- otherwise it would use the same vtable slot. In .dex files
// those don't end up in the virtual method table, so it shouldn't
// matter which direction we go. We walk it backward anyway.)
- for (k = input_array->GetLength() - 1; k >= 0; --k) {
- mirror::ArtMethod* vtable_method = input_array->GetWithoutChecks(k);
- mirror::ArtMethod* vtable_method_for_name_comparison =
- vtable_method->GetInterfaceMethodIfProxy();
+ for (k = input_array_length - 1; k >= 0; --k) {
+ ArtMethod* vtable_method = input_virtual_methods != nullptr ?
+ reinterpret_cast<ArtMethod*>(
+ reinterpret_cast<uintptr_t>(input_virtual_methods) + method_size * k) :
+ input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_);
+ ArtMethod* vtable_method_for_name_comparison =
+ vtable_method->GetInterfaceMethodIfProxy(image_pointer_size_);
if (interface_name_comparator.HasSameNameAndSignature(
vtable_method_for_name_comparison)) {
if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
- ThrowIllegalAccessError(
- klass.Get(),
+ ThrowIllegalAccessError(klass.Get(),
"Method '%s' implementing interface method '%s' is not public",
- PrettyMethod(vtable_method).c_str(),
- PrettyMethod(interface_method).c_str());
+ PrettyMethod(vtable_method).c_str(), PrettyMethod(interface_method).c_str());
return false;
}
- method_array->SetWithoutChecks<false>(j, vtable_method);
+ method_array->SetElementPtrSize(j, vtable_method, image_pointer_size_);
// Place method in imt if entry is empty, place conflict otherwise.
uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
- mirror::ArtMethod* imt_ref = out_imt->GetReference(imt_index)->AsArtMethod();
- mirror::ArtMethod* conflict_method = runtime->GetImtConflictMethod();
- if (imt_ref == runtime->GetImtUnimplementedMethod()) {
- out_imt->SetReference(imt_index, vtable_method);
- } else if (imt_ref != conflict_method) {
+ auto** imt_ref = &out_imt[imt_index];
+ if (*imt_ref == unimplemented_method) {
+ *imt_ref = vtable_method;
+ } else if (*imt_ref != conflict_method) {
// If we are not a conflict and we have the same signature and name as the imt entry,
// it must be that we overwrote a superclass vtable entry.
- MethodNameAndSignatureComparator imt_ref_name_comparator(
- imt_ref->GetInterfaceMethodIfProxy());
- if (imt_ref_name_comparator.HasSameNameAndSignature(
- vtable_method_for_name_comparison)) {
- out_imt->SetReference(imt_index, vtable_method);
- } else {
- out_imt->SetReference(imt_index, conflict_method);
- }
+ MethodNameAndSignatureComparator imt_comparator(
+ (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size_));
+ *imt_ref = imt_comparator.HasSameNameAndSignature(vtable_method_for_name_comparison) ?
+ vtable_method : conflict_method;
}
break;
}
}
if (k < 0 && !super_interface) {
- mirror::ArtMethod* miranda_method = nullptr;
- for (size_t l = 0; l < miranda_list_size; ++l) {
- mirror::ArtMethod* mir_method = miranda_list->Get(l);
+ ArtMethod* miranda_method = nullptr;
+ for (auto& mir_method : miranda_methods) {
if (interface_name_comparator.HasSameNameAndSignature(mir_method)) {
miranda_method = mir_method;
break;
}
}
if (miranda_method == nullptr) {
+ size_t size = ArtMethod::ObjectSize(image_pointer_size_);
+ miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(size));
+ CHECK(miranda_method != nullptr);
// Point the interface table at a phantom slot.
- miranda_method = interface_method->Clone(self)->AsArtMethod();
- if (UNLIKELY(miranda_method == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return false;
- }
- DCHECK_LT(miranda_list_size, max_miranda_methods);
- miranda_list->Set<false>(miranda_list_size++, miranda_method);
+ new(miranda_method) ArtMethod(*interface_method, image_pointer_size_);
+ miranda_methods.push_back(miranda_method);
}
- method_array->SetWithoutChecks<false>(j, miranda_method);
+ method_array->SetElementPtrSize(j, miranda_method, image_pointer_size_);
}
}
}
}
- if (miranda_list_size > 0) {
- int old_method_count = klass->NumVirtualMethods();
- int new_method_count = old_method_count + miranda_list_size;
- mirror::ObjectArray<mirror::ArtMethod>* virtuals;
- if (old_method_count == 0) {
- virtuals = AllocArtMethodArray(self, new_method_count);
- } else {
- virtuals = klass->GetVirtualMethods()->CopyOf(self, new_method_count);
- }
+ if (!miranda_methods.empty()) {
+ const size_t old_method_count = klass->NumVirtualMethods();
+ const size_t new_method_count = old_method_count + miranda_methods.size();
+ // Attempt to realloc to save RAM if possible.
+ ArtMethod* old_virtuals = klass->GetVirtualMethodsPtr();
+ // The Realloced virtual methods aren't visiblef from the class roots, so there is no issue
+ // where GCs could attempt to mark stale pointers due to memcpy. And since we overwrite the
+ // realloced memory with out->CopyFrom, we are guaranteed to have objects in the to space since
+ // CopyFrom has internal read barriers.
+ auto* virtuals = reinterpret_cast<ArtMethod*>(runtime->GetLinearAlloc()->Realloc(
+ self, old_virtuals, old_method_count * method_size, new_method_count * method_size));
if (UNLIKELY(virtuals == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
+ self->AssertPendingOOMException();
return false;
}
- klass->SetVirtualMethods(virtuals);
+ ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter());
+ if (virtuals != old_virtuals) {
+ // Maps from heap allocated miranda method to linear alloc miranda method.
+ StrideIterator<ArtMethod> out(reinterpret_cast<uintptr_t>(virtuals), method_size);
+ // Copy over the old methods + miranda methods.
+ for (auto& m : klass->GetVirtualMethods(image_pointer_size_)) {
+ move_table.emplace(&m, &*out);
+ // The CopyFrom is only necessary to not miss read barriers since Realloc won't do read
+ // barriers when it copies.
+ out->CopyFrom(&m, image_pointer_size_);
+ ++out;
+ }
+ }
+ UpdateClassVirtualMethods(klass.Get(), virtuals, new_method_count);
+ // Done copying methods, they are all reachable from the class now, so we can end the no thread
+ // suspension assert.
+ self->EndAssertNoThreadSuspension(old_cause);
- int old_vtable_count = vtable->GetLength();
- int new_vtable_count = old_vtable_count + miranda_list_size;
- vtable.Assign(vtable->CopyOf(self, new_vtable_count));
+ size_t old_vtable_count = vtable->GetLength();
+ const size_t new_vtable_count = old_vtable_count + miranda_methods.size();
+ vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count)));
if (UNLIKELY(vtable.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
+ self->AssertPendingOOMException();
return false;
}
- for (size_t i = 0; i < miranda_list_size; ++i) {
- mirror::ArtMethod* method = miranda_list->Get(i);
+ StrideIterator<ArtMethod> out(
+ reinterpret_cast<uintptr_t>(virtuals) + old_method_count * method_size, method_size);
+ for (auto* mir_method : miranda_methods) {
+ ArtMethod* out_method = &*out;
+ out->CopyFrom(mir_method, image_pointer_size_);
// Leave the declaring class alone as type indices are relative to it
- method->SetAccessFlags(method->GetAccessFlags() | kAccMiranda);
- method->SetMethodIndex(0xFFFF & (old_vtable_count + i));
- klass->SetVirtualMethod(old_method_count + i, method);
- vtable->SetWithoutChecks<false>(old_vtable_count + i, method);
+ out_method->SetAccessFlags(out_method->GetAccessFlags() | kAccMiranda);
+ out_method->SetMethodIndex(0xFFFF & old_vtable_count);
+ vtable->SetElementPtrSize(old_vtable_count, out_method, image_pointer_size_);
+ move_table.emplace(mir_method, out_method);
+ ++out;
+ ++old_vtable_count;
+ }
+
+ // Update old vtable methods.
+ for (size_t i = 0; i < old_vtable_count - miranda_methods.size(); ++i) {
+ auto* m = vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_);
+ DCHECK(m != nullptr) << PrettyClass(klass.Get());
+ auto it = move_table.find(m);
+ if (it != move_table.end()) {
+ auto* new_m = it->second;
+ DCHECK(new_m != nullptr) << PrettyClass(klass.Get());
+ vtable->SetElementPtrSize(i, new_m, image_pointer_size_);
+ }
}
- // TODO: do not assign to the vtable field until it is fully constructed.
klass->SetVTable(vtable.Get());
+ CHECK_EQ(old_vtable_count, new_vtable_count);
+ // Go fix up all the stale miranda pointers.
+ for (size_t i = 0; i < ifcount; ++i) {
+ for (size_t j = 0, count = iftable->GetMethodArrayCount(i); j < count; ++j) {
+ auto* method_array = iftable->GetMethodArray(i);
+ auto* m = method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
+ DCHECK(m != nullptr) << PrettyClass(klass.Get());
+ auto it = move_table.find(m);
+ if (it != move_table.end()) {
+ auto* new_m = it->second;
+ DCHECK(new_m != nullptr) << PrettyClass(klass.Get());
+ method_array->SetElementPtrSize(j, new_m, image_pointer_size_);
+ }
+ }
+ }
+ // Check that there are no stale methods are in the dex cache array.
+ if (kIsDebugBuild) {
+ auto* resolved_methods = klass->GetDexCache()->GetResolvedMethods();
+ for (size_t i = 0, count = resolved_methods->GetLength(); i < count; ++i) {
+ auto* m = resolved_methods->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_);
+ CHECK(move_table.find(m) == move_table.end()) << PrettyMethod(m);
+ }
+ }
+ // Put some random garbage in old virtuals to help find stale pointers.
+ if (virtuals != old_virtuals) {
+ memset(old_virtuals, 0xFEu, ArtMethod::ObjectSize(image_pointer_size_) * old_method_count);
+ }
+ } else {
+ self->EndAssertNoThreadSuspension(old_cause);
}
-
if (kIsDebugBuild) {
- mirror::ObjectArray<mirror::ArtMethod>* check_vtable = klass->GetVTableDuringLinking();
+ auto* check_vtable = klass->GetVTableDuringLinking();
for (int i = 0; i < check_vtable->GetLength(); ++i) {
- CHECK(check_vtable->GetWithoutChecks(i) != nullptr);
+ CHECK(check_vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_) != nullptr);
}
}
-
- self->AllowThreadSuspension();
return true;
}
@@ -4984,7 +5095,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_
// Initialize field_offset
MemberOffset field_offset(0);
if (is_static) {
- field_offset = klass->GetFirstReferenceStaticFieldOffsetDuringLinking();
+ field_offset = klass->GetFirstReferenceStaticFieldOffsetDuringLinking(image_pointer_size_);
} else {
mirror::Class* super_class = klass->GetSuperClass();
if (super_class != nullptr) {
@@ -5059,19 +5170,14 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
if (!klass->IsVariableSize()) {
- if (klass->DescriptorEquals("Ljava/lang/reflect/ArtMethod;")) {
- size_t pointer_size = GetInstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
- klass->SetObjectSize(mirror::ArtMethod::InstanceSize(pointer_size));
- } else {
- std::string temp;
- DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
- size_t previous_size = klass->GetObjectSize();
- if (previous_size != 0) {
- // Make sure that we didn't originally have an incorrect size.
- CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
- }
- klass->SetObjectSize(size);
+ std::string temp;
+ DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
+ size_t previous_size = klass->GetObjectSize();
+ if (previous_size != 0) {
+ // Make sure that we didn't originally have an incorrect size.
+ CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
}
+ klass->SetObjectSize(size);
}
}
@@ -5079,7 +5185,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_
// Make sure that the fields array is ordered by name but all reference
// offsets are at the beginning as far as alignment allows.
MemberOffset start_ref_offset = is_static
- ? klass->GetFirstReferenceStaticFieldOffsetDuringLinking()
+ ? klass->GetFirstReferenceStaticFieldOffsetDuringLinking(image_pointer_size_)
: klass->GetFirstReferenceInstanceFieldOffset();
MemberOffset end_ref_offset(start_ref_offset.Uint32Value() +
num_reference_fields *
@@ -5203,19 +5309,19 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i
}
}
DCHECK((resolved == nullptr) || resolved->IsResolved() || resolved->IsErroneous())
- << PrettyDescriptor(resolved) << " " << resolved->GetStatus();
+ << PrettyDescriptor(resolved) << " " << resolved->GetStatus();
return resolved;
}
-mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx,
- Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader,
- Handle<mirror::ArtMethod> referrer,
- InvokeType type) {
+ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader,
+ ArtMethod* referrer, InvokeType type) {
DCHECK(dex_cache.Get() != nullptr);
// Check for hit in the dex cache.
- mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
+ ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, image_pointer_size_);
if (resolved != nullptr && !resolved->IsRuntimeMethod()) {
+ DCHECK(resolved->GetDeclaringClassUnchecked() != nullptr) << resolved->GetDexMethodIndex();
return resolved;
}
// Fail, get the declaring class.
@@ -5230,15 +5336,16 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
switch (type) {
case kDirect: // Fall-through.
case kStatic:
- resolved = klass->FindDirectMethod(dex_cache.Get(), method_idx);
+ resolved = klass->FindDirectMethod(dex_cache.Get(), method_idx, image_pointer_size_);
+ DCHECK(resolved == nullptr || resolved->GetDeclaringClassUnchecked() != nullptr);
break;
case kInterface:
- resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx);
+ resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, image_pointer_size_);
DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
break;
case kSuper: // Fall-through.
case kVirtual:
- resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx);
+ resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx, image_pointer_size_);
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << type;
@@ -5251,27 +5358,28 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
switch (type) {
case kDirect: // Fall-through.
case kStatic:
- resolved = klass->FindDirectMethod(name, signature);
+ resolved = klass->FindDirectMethod(name, signature, image_pointer_size_);
+ DCHECK(resolved == nullptr || resolved->GetDeclaringClassUnchecked() != nullptr);
break;
case kInterface:
- resolved = klass->FindInterfaceMethod(name, signature);
+ resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_);
DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
break;
case kSuper: // Fall-through.
case kVirtual:
- resolved = klass->FindVirtualMethod(name, signature);
+ resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_);
break;
}
}
// If we found a method, check for incompatible class changes.
if (LIKELY(resolved != nullptr && !resolved->CheckIncompatibleClassChange(type))) {
// Be a good citizen and update the dex cache to speed subsequent calls.
- dex_cache->SetResolvedMethod(method_idx, resolved);
+ dex_cache->SetResolvedMethod(method_idx, resolved, image_pointer_size_);
return resolved;
} else {
// If we had a method, it's an incompatible-class-change error.
if (resolved != nullptr) {
- ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer.Get());
+ ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer);
} else {
// We failed to find the method which means either an access error, an incompatible class
// change, or no such method. First try to find the method among direct and virtual methods.
@@ -5280,28 +5388,27 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
switch (type) {
case kDirect:
case kStatic:
- resolved = klass->FindVirtualMethod(name, signature);
+ resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_);
// Note: kDirect and kStatic are also mutually exclusive, but in that case we would
// have had a resolved method before, which triggers the "true" branch above.
break;
case kInterface:
case kVirtual:
case kSuper:
- resolved = klass->FindDirectMethod(name, signature);
+ resolved = klass->FindDirectMethod(name, signature, image_pointer_size_);
break;
}
// If we found something, check that it can be accessed by the referrer.
bool exception_generated = false;
- if (resolved != nullptr && referrer.Get() != nullptr) {
+ if (resolved != nullptr && referrer != nullptr) {
mirror::Class* methods_class = resolved->GetDeclaringClass();
mirror::Class* referring_class = referrer->GetDeclaringClass();
if (!referring_class->CanAccess(methods_class)) {
- ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
- resolved, type);
+ ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, resolved,
+ type);
exception_generated = true;
- } else if (!referring_class->CanAccessMember(methods_class,
- resolved->GetAccessFlags())) {
+ } else if (!referring_class->CanAccessMember(methods_class, resolved->GetAccessFlags())) {
ThrowIllegalAccessErrorMethod(referring_class, resolved);
exception_generated = true;
}
@@ -5314,11 +5421,11 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
case kDirect:
case kStatic:
if (resolved != nullptr) {
- ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
+ ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer);
} else {
- resolved = klass->FindInterfaceMethod(name, signature);
+ resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_);
if (resolved != nullptr) {
- ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
+ ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer);
} else {
ThrowNoSuchMethodError(type, klass, name, signature);
}
@@ -5326,11 +5433,11 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
break;
case kInterface:
if (resolved != nullptr) {
- ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
+ ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer);
} else {
- resolved = klass->FindVirtualMethod(name, signature);
+ resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_);
if (resolved != nullptr) {
- ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
+ ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer);
} else {
ThrowNoSuchMethodError(type, klass, name, signature);
}
@@ -5338,18 +5445,18 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
break;
case kSuper:
if (resolved != nullptr) {
- ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
+ ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer);
} else {
ThrowNoSuchMethodError(type, klass, name, signature);
}
break;
case kVirtual:
if (resolved != nullptr) {
- ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
+ ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer);
} else {
- resolved = klass->FindInterfaceMethod(name, signature);
+ resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_);
if (resolved != nullptr) {
- ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
+ ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer);
} else {
ThrowNoSuchMethodError(type, klass, name, signature);
}
@@ -5434,7 +5541,7 @@ ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, uint32_t field_i
return resolved;
}
-const char* ClassLinker::MethodShorty(uint32_t method_idx, mirror::ArtMethod* referrer,
+const char* ClassLinker::MethodShorty(uint32_t method_idx, ArtMethod* referrer,
uint32_t* length) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::DexCache* dex_cache = declaring_class->GetDexCache();
@@ -5489,14 +5596,14 @@ const void* ClassLinker::GetRuntimeQuickGenericJniStub() const {
return GetQuickGenericJniStub();
}
-void ClassLinker::SetEntryPointsToCompiledCode(mirror::ArtMethod* method,
+void ClassLinker::SetEntryPointsToCompiledCode(ArtMethod* method,
const void* method_code) const {
OatFile::OatMethod oat_method = CreateOatMethod(method_code);
oat_method.LinkMethod(method);
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
}
-void ClassLinker::SetEntryPointsToInterpreter(mirror::ArtMethod* method) const {
+void ClassLinker::SetEntryPointsToInterpreter(ArtMethod* method) const {
if (!method->IsNative()) {
method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge);
method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
@@ -5557,13 +5664,11 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) {
"Ljava/lang/String;",
"Ljava/lang/DexCache;",
"Ljava/lang/ref/Reference;",
- "Ljava/lang/reflect/ArtMethod;",
"Ljava/lang/reflect/Constructor;",
"Ljava/lang/reflect/Field;",
"Ljava/lang/reflect/Method;",
"Ljava/lang/reflect/Proxy;",
"[Ljava/lang/String;",
- "[Ljava/lang/reflect/ArtMethod;",
"[Ljava/lang/reflect/Constructor;",
"[Ljava/lang/reflect/Field;",
"[Ljava/lang/reflect/Method;",
@@ -5635,7 +5740,7 @@ std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const char* descr
return ComputeModifiedUtf8Hash(descriptor);
}
-bool ClassLinker::MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m) {
+bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) {
if (Runtime::Current()->UseJit()) {
// JIT can have direct code pointers from any method to any other method.
return true;
@@ -5757,4 +5862,12 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFi
return soa.Env()->NewGlobalRef(local_ref.get());
}
+ArtMethod* ClassLinker::CreateRuntimeMethod() {
+ ArtMethod* method = AllocArtMethodArray(Thread::Current(), 1);
+ CHECK(method != nullptr);
+ method->SetDexMethodIndex(DexFile::kDexNoIndex);
+ CHECK(method->IsRuntimeMethod());
+ return method;
+}
+
} // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 947e152..fa8b2e7 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -42,6 +42,7 @@ namespace space {
namespace mirror {
class ClassLoader;
class DexCache;
+ class DexCachePointerArray;
class DexCacheTest_Open_Test;
class IfTable;
template<class T> class ObjectArray;
@@ -71,13 +72,11 @@ class ClassLinker {
kJavaLangString,
kJavaLangDexCache,
kJavaLangRefReference,
- kJavaLangReflectArtMethod,
kJavaLangReflectConstructor,
kJavaLangReflectField,
kJavaLangReflectMethod,
kJavaLangReflectProxy,
kJavaLangStringArrayClass,
- kJavaLangReflectArtMethodArrayClass,
kJavaLangReflectConstructorArrayClass,
kJavaLangReflectFieldArrayClass,
kJavaLangReflectMethodArrayClass,
@@ -187,7 +186,7 @@ class ClassLinker {
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
- mirror::String* ResolveString(uint32_t string_idx, mirror::ArtMethod* referrer)
+ mirror::String* ResolveString(uint32_t string_idx, ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a String with the given index from the DexFile, storing the
@@ -205,7 +204,7 @@ class ClassLinker {
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
- mirror::Class* ResolveType(uint16_t type_idx, mirror::ArtMethod* referrer)
+ mirror::Class* ResolveType(uint16_t type_idx, ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* ResolveType(uint16_t type_idx, ArtField* referrer)
@@ -225,25 +224,22 @@ class ClassLinker {
// in ResolveType. What is unique is the method type argument which
// is used to determine if this method is a direct, static, or
// virtual method.
- mirror::ArtMethod* ResolveMethod(const DexFile& dex_file,
- uint32_t method_idx,
- Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader,
- Handle<mirror::ArtMethod> referrer,
- InvokeType type)
+ ArtMethod* ResolveMethod(const DexFile& dex_file, uint32_t method_idx,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader, ArtMethod* referrer,
+ InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer)
+ ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, mirror::ArtMethod** referrer,
- InvokeType type)
+ ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ArtField* GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtField* ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer, bool is_static)
+ ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a field with a given ID from the DexFile, storing the
@@ -263,12 +259,12 @@ class ClassLinker {
// in ResolveType. No is_static argument is provided so that Java
// field resolution semantics are followed.
ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
- Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader)
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get shorty from method index without resolution. Used to do handlerization.
- const char* MethodShorty(uint32_t method_idx, mirror::ArtMethod* referrer, uint32_t* length)
+ const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true on success, false if there's an exception pending.
@@ -323,7 +319,7 @@ class ClassLinker {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDexFileRegistered(const DexFile& dex_file)
LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupDexCaches(mirror::ArtMethod* resolution_method)
+ void FixupDexCaches(ArtMethod* resolution_method)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -360,7 +356,9 @@ class ClassLinker {
mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::ArtMethod>* AllocArtMethodArray(Thread* self, size_t length)
+ ArtMethod* AllocArtMethodArray(Thread* self, size_t length);
+
+ mirror::PointerArray* AllocPointerArray(Thread* self, size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount)
@@ -381,7 +379,7 @@ class ClassLinker {
void ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::ArtMethod* klass)
+ void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, ArtMethod* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name,
@@ -390,23 +388,23 @@ class ClassLinker {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
std::string GetDescriptorForProxy(mirror::Class* proxy_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* FindMethodForProxy(mirror::Class* proxy_class,
- mirror::ArtMethod* proxy_method)
+ ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the oat code for a method when its class isn't yet initialized
- const void* GetQuickOatCodeFor(mirror::ArtMethod* method)
+ const void* GetQuickOatCodeFor(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the oat code for a method from a method index.
- const void* GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx)
+ const void* GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
+ uint32_t method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get compiled code for a method, return null if no code
// exists. This is unlike Get..OatCodeFor which will return a bridge
// or interpreter entrypoint.
- const void* GetOatMethodQuickCodeFor(mirror::ArtMethod* method)
+ const void* GetOatMethodQuickCodeFor(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
pid_t GetClassesLockOwner(); // For SignalCatcher.
@@ -430,11 +428,11 @@ class ClassLinker {
}
// Set the entrypoints up for method to the given code.
- void SetEntryPointsToCompiledCode(mirror::ArtMethod* method, const void* method_code) const
+ void SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Set the entrypoints up for method to the enter the interpreter.
- void SetEntryPointsToInterpreter(mirror::ArtMethod* method) const
+ void SetEntryPointsToInterpreter(ArtMethod* method) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Attempts to insert a class into a class table. Returns null if
@@ -444,9 +442,6 @@ class ClassLinker {
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Special code to allocate an art method, use this instead of class->AllocObject.
- mirror::ArtMethod* AllocArtMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != nullptr);
@@ -465,7 +460,7 @@ class ClassLinker {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if the method can be called with its direct code pointer, false otherwise.
- bool MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m)
+ bool MayBeCalledWithDirectCodePointer(ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files.
@@ -473,11 +468,20 @@ class ClassLinker {
jobject CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- private:
- static void InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg)
+ size_t GetImagePointerSize() const {
+ DCHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_;
+ return image_pointer_size_;
+ }
+
+ // Used by image writer for checking.
+ bool ClassInClassTable(mirror::Class* klass)
+ LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const OatFile::OatMethod FindOatMethodFor(mirror::ArtMethod* method, bool* found)
+ ArtMethod* CreateRuntimeMethod();
+
+ private:
+ const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
OatFile& GetImageOatFile(gc::space::ImageSpace* space)
@@ -535,9 +539,8 @@ class ClassLinker {
ArtField* dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* LoadMethod(Thread* self, const DexFile& dex_file,
- const ClassDataItemIterator& dex_method,
- Handle<mirror::Class> klass)
+ void LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it,
+ Handle<mirror::Class> klass, ArtMethod* dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -566,9 +569,8 @@ class ClassLinker {
Handle<mirror::ClassLoader> class_loader2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, mirror::ArtMethod* method,
- mirror::Class* klass1,
- mirror::Class* klass2)
+ bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, ArtMethod* method,
+ mirror::Class* klass1, mirror::Class* klass2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
@@ -584,15 +586,15 @@ class ClassLinker {
bool LinkMethods(Thread* self, Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- StackHandleScope<mirror::Class::kImtSize>* out_imt)
+ ArtMethod** out_imt)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkInterfaceMethods(Thread* const self, Handle<mirror::Class> klass,
+ bool LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- StackHandleScope<mirror::Class::kImtSize>* out_imt)
+ ArtMethod** out_imt)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
@@ -601,12 +603,17 @@ class ClassLinker {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
+ void LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class,
uint32_t class_def_method_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckProxyConstructor(ArtMethod* constructor) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// For use by ImageWriter to find DexCaches for its roots
ReaderWriterMutex* DexLock()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) {
@@ -623,10 +630,9 @@ class ClassLinker {
// Returns the boot image oat file.
const OatFile* GetBootOatFile() SHARED_LOCKS_REQUIRED(dex_lock_);
- mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass)
+ void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* CreateProxyMethod(Thread* self, Handle<mirror::Class> klass,
- Handle<mirror::ArtMethod> prototype)
+ void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the
@@ -673,6 +679,16 @@ class ClassLinker {
// Check for duplicate class definitions of the given oat file against all open oat files.
bool HasCollisions(const OatFile* oat_file, std::string* error_msg) LOCKS_EXCLUDED(dex_lock_);
+ bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods,
+ size_t new_num_methods)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
+
std::vector<const DexFile*> boot_class_path_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index d155941..a4e0227 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -20,6 +20,7 @@
#include <string>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "dex_file.h"
@@ -27,7 +28,6 @@
#include "gc/heap.h"
#include "mirror/abstract_method.h"
#include "mirror/accessible_object.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "mirror/field.h"
@@ -159,9 +159,9 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
}
- void AssertMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void AssertMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
EXPECT_TRUE(method != nullptr);
- EXPECT_TRUE(method->GetClass() != nullptr);
+ EXPECT_TRUE(method->GetDeclaringClass() != nullptr);
EXPECT_TRUE(method->GetName() != nullptr);
EXPECT_TRUE(method->GetSignature() != Signature::NoSignature());
@@ -208,8 +208,8 @@ class ClassLinkerTest : public CommonRuntimeTest {
if (klass->IsInterface()) {
EXPECT_TRUE(klass->IsAbstract());
if (klass->NumDirectMethods() == 1) {
- EXPECT_TRUE(klass->GetDirectMethod(0)->IsClassInitializer());
- EXPECT_TRUE(klass->GetDirectMethod(0)->IsDirect());
+ EXPECT_TRUE(klass->GetDirectMethod(0, sizeof(void*))->IsClassInitializer());
+ EXPECT_TRUE(klass->GetDirectMethod(0, sizeof(void*))->IsDirect());
} else {
EXPECT_EQ(0U, klass->NumDirectMethods());
}
@@ -246,18 +246,16 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_FALSE(klass->IsPrimitive());
EXPECT_TRUE(klass->CanAccess(klass.Get()));
- for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
- mirror::ArtMethod* method = klass->GetDirectMethod(i);
- AssertMethod(method);
- EXPECT_TRUE(method->IsDirect());
- EXPECT_EQ(klass.Get(), method->GetDeclaringClass());
+ for (ArtMethod& method : klass->GetDirectMethods(sizeof(void*))) {
+ AssertMethod(&method);
+ EXPECT_TRUE(method.IsDirect());
+ EXPECT_EQ(klass.Get(), method.GetDeclaringClass());
}
- for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
- mirror::ArtMethod* method = klass->GetVirtualMethod(i);
- AssertMethod(method);
- EXPECT_FALSE(method->IsDirect());
- EXPECT_TRUE(method->GetDeclaringClass()->IsAssignableFrom(klass.Get()));
+ for (ArtMethod& method : klass->GetVirtualMethods(sizeof(void*))) {
+ AssertMethod(&method);
+ EXPECT_FALSE(method.IsDirect());
+ EXPECT_TRUE(method.GetDeclaringClass()->IsAssignableFrom(klass.Get()));
}
for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
@@ -358,9 +356,10 @@ class ClassLinkerTest : public CommonRuntimeTest {
class_linker_->VisitRoots(&visitor, kVisitRootFlagAllRoots);
// Verify the dex cache has resolution methods in all resolved method slots
mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
- mirror::ObjectArray<mirror::ArtMethod>* resolved_methods = dex_cache->GetResolvedMethods();
+ auto* resolved_methods = dex_cache->GetResolvedMethods();
for (size_t i = 0; i < static_cast<size_t>(resolved_methods->GetLength()); i++) {
- EXPECT_TRUE(resolved_methods->Get(i) != nullptr) << dex.GetLocation() << " i=" << i;
+ EXPECT_TRUE(resolved_methods->GetElementPtrSize<ArtMethod*>(i, sizeof(void*)) != nullptr)
+ << dex.GetLocation() << " i=" << i;
}
}
@@ -394,9 +393,8 @@ struct CheckOffsets {
bool error = false;
- // Methods and classes have a different size due to padding field. Strings are variable length.
- if (!klass->IsArtMethodClass() && !klass->IsClassClass() && !klass->IsStringClass() &&
- !is_static) {
+ // Classes have a different size due to padding field. Strings are variable length.
+ if (!klass->IsClassClass() && !klass->IsStringClass() && !is_static) {
// Currently only required for AccessibleObject since of the padding fields. The class linker
// says AccessibleObject is 9 bytes but sizeof(AccessibleObject) is 12 bytes due to padding.
// The RoundUp is to get around this case.
@@ -487,20 +485,6 @@ struct ObjectOffsets : public CheckOffsets<mirror::Object> {
};
};
-struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> {
- ArtMethodOffsets() : CheckOffsets<mirror::ArtMethod>(false, "Ljava/lang/reflect/ArtMethod;") {
- addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags");
- addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_), "declaringClass");
- addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_),
- "dexCacheResolvedMethods");
- addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_),
- "dexCacheResolvedTypes");
- addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset");
- addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex");
- addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex");
- };
-};
-
struct ClassOffsets : public CheckOffsets<mirror::Class> {
ClassOffsets() : CheckOffsets<mirror::Class>(false, "Ljava/lang/Class;") {
addOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags");
@@ -516,12 +500,14 @@ struct ClassOffsets : public CheckOffsets<mirror::Class> {
addOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields");
addOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable");
addOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_direct_methods_), "numDirectMethods");
addOffset(OFFSETOF_MEMBER(mirror::Class, num_instance_fields_), "numInstanceFields");
addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_),
"numReferenceInstanceFields");
addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_),
"numReferenceStaticFields");
addOffset(OFFSETOF_MEMBER(mirror::Class, num_static_fields_), "numStaticFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_virtual_methods_), "numVirtualMethods");
addOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize");
addOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType");
addOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_),
@@ -641,7 +627,6 @@ struct AbstractMethodOffsets : public CheckOffsets<mirror::AbstractMethod> {
TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) {
ScopedObjectAccess soa(Thread::Current());
EXPECT_TRUE(ObjectOffsets().Check());
- EXPECT_TRUE(ArtMethodOffsets().Check());
EXPECT_TRUE(ClassOffsets().Check());
EXPECT_TRUE(StringOffsets().Check());
EXPECT_TRUE(ThrowableOffsets().Check());
@@ -899,7 +884,7 @@ TEST_F(ClassLinkerTest, StaticFields) {
// Static final primitives that are initialized by a compile-time constant
// expression resolve to a copy of a constant value from the constant pool.
// So <clinit> should be null.
- mirror::ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V");
+ ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V", sizeof(void*));
EXPECT_TRUE(clinit == nullptr);
EXPECT_EQ(9U, statics->NumStaticFields());
@@ -986,15 +971,15 @@ TEST_F(ClassLinkerTest, Interfaces) {
EXPECT_TRUE(J->IsAssignableFrom(B.Get()));
const Signature void_sig = I->GetDexCache()->GetDexFile()->CreateSignature("()V");
- mirror::ArtMethod* Ii = I->FindVirtualMethod("i", void_sig);
- mirror::ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig);
- mirror::ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig);
- mirror::ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig);
- mirror::ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig);
- mirror::ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig);
- mirror::ArtMethod* Ai = A->FindVirtualMethod("i", void_sig);
- mirror::ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig);
- mirror::ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig);
+ ArtMethod* Ii = I->FindVirtualMethod("i", void_sig, sizeof(void*));
+ ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig, sizeof(void*));
+ ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig, sizeof(void*));
+ ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig, sizeof(void*));
+ ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig, sizeof(void*));
+ ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig, sizeof(void*));
+ ArtMethod* Ai = A->FindVirtualMethod("i", void_sig, sizeof(void*));
+ ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig, sizeof(void*));
+ ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig, sizeof(void*));
ASSERT_TRUE(Ii != nullptr);
ASSERT_TRUE(Jj1 != nullptr);
ASSERT_TRUE(Jj2 != nullptr);
@@ -1009,21 +994,17 @@ TEST_F(ClassLinkerTest, Interfaces) {
EXPECT_NE(Jj2, Aj2);
EXPECT_EQ(Kj1, Jj1);
EXPECT_EQ(Kj2, Jj2);
- EXPECT_EQ(Ai, A->FindVirtualMethodForInterface(Ii));
- EXPECT_EQ(Aj1, A->FindVirtualMethodForInterface(Jj1));
- EXPECT_EQ(Aj2, A->FindVirtualMethodForInterface(Jj2));
- EXPECT_EQ(Ai, A->FindVirtualMethodForVirtualOrInterface(Ii));
- EXPECT_EQ(Aj1, A->FindVirtualMethodForVirtualOrInterface(Jj1));
- EXPECT_EQ(Aj2, A->FindVirtualMethodForVirtualOrInterface(Jj2));
-
- ArtField* Afoo = mirror::Class::FindStaticField(soa.Self(), A, "foo",
- "Ljava/lang/String;");
- ArtField* Bfoo = mirror::Class::FindStaticField(soa.Self(), B, "foo",
- "Ljava/lang/String;");
- ArtField* Jfoo = mirror::Class::FindStaticField(soa.Self(), J, "foo",
- "Ljava/lang/String;");
- ArtField* Kfoo = mirror::Class::FindStaticField(soa.Self(), K, "foo",
- "Ljava/lang/String;");
+ EXPECT_EQ(Ai, A->FindVirtualMethodForInterface(Ii, sizeof(void*)));
+ EXPECT_EQ(Aj1, A->FindVirtualMethodForInterface(Jj1, sizeof(void*)));
+ EXPECT_EQ(Aj2, A->FindVirtualMethodForInterface(Jj2, sizeof(void*)));
+ EXPECT_EQ(Ai, A->FindVirtualMethodForVirtualOrInterface(Ii, sizeof(void*)));
+ EXPECT_EQ(Aj1, A->FindVirtualMethodForVirtualOrInterface(Jj1, sizeof(void*)));
+ EXPECT_EQ(Aj2, A->FindVirtualMethodForVirtualOrInterface(Jj2, sizeof(void*)));
+
+ ArtField* Afoo = mirror::Class::FindStaticField(soa.Self(), A, "foo", "Ljava/lang/String;");
+ ArtField* Bfoo = mirror::Class::FindStaticField(soa.Self(), B, "foo", "Ljava/lang/String;");
+ ArtField* Jfoo = mirror::Class::FindStaticField(soa.Self(), J, "foo", "Ljava/lang/String;");
+ ArtField* Kfoo = mirror::Class::FindStaticField(soa.Self(), K, "foo", "Ljava/lang/String;");
ASSERT_TRUE(Afoo != nullptr);
EXPECT_EQ(Afoo, Bfoo);
EXPECT_EQ(Afoo, Jfoo);
@@ -1043,17 +1024,17 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) {
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
- mirror::ArtMethod* clinit = klass->FindClassInitializer();
- mirror::ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;");
+ ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
+ ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;", sizeof(void*));
const DexFile::StringId* string_id = dex_file->FindStringId("LStaticsFromCode;");
ASSERT_TRUE(string_id != nullptr);
const DexFile::TypeId* type_id = dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = dex_file->GetIndexForTypeId(*type_id);
- mirror::Class* uninit = ResolveVerifyAndClinit(type_idx, clinit, Thread::Current(), true, false);
+ mirror::Class* uninit = ResolveVerifyAndClinit(type_idx, clinit, soa.Self(), true, false);
EXPECT_TRUE(uninit != nullptr);
EXPECT_FALSE(uninit->IsInitialized());
- mirror::Class* init = ResolveVerifyAndClinit(type_idx, getS0, Thread::Current(), true, false);
+ mirror::Class* init = ResolveVerifyAndClinit(type_idx, getS0, soa.Self(), true, false);
EXPECT_TRUE(init != nullptr);
EXPECT_TRUE(init->IsInitialized());
}
@@ -1109,22 +1090,23 @@ TEST_F(ClassLinkerTest, ValidatePredefinedClassSizes) {
mirror::Class* c;
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Class;", class_loader);
- EXPECT_EQ(c->GetClassSize(), mirror::Class::ClassClassSize());
+ ASSERT_TRUE(c != nullptr);
+ EXPECT_EQ(c->GetClassSize(), mirror::Class::ClassClassSize(sizeof(void*)));
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Object;", class_loader);
- EXPECT_EQ(c->GetClassSize(), mirror::Object::ClassSize());
+ ASSERT_TRUE(c != nullptr);
+ EXPECT_EQ(c->GetClassSize(), mirror::Object::ClassSize(sizeof(void*)));
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/String;", class_loader);
- EXPECT_EQ(c->GetClassSize(), mirror::String::ClassSize());
+ ASSERT_TRUE(c != nullptr);
+ EXPECT_EQ(c->GetClassSize(), mirror::String::ClassSize(sizeof(void*)));
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/DexCache;", class_loader);
- EXPECT_EQ(c->GetClassSize(), mirror::DexCache::ClassSize());
-
- c = class_linker_->FindClass(soa.Self(), "Ljava/lang/reflect/ArtMethod;", class_loader);
- EXPECT_EQ(c->GetClassSize(), mirror::ArtMethod::ClassSize());
+ ASSERT_TRUE(c != nullptr);
+ EXPECT_EQ(c->GetClassSize(), mirror::DexCache::ClassSize(sizeof(void*)));
}
-static void CheckMethod(mirror::ArtMethod* method, bool verified)
+static void CheckMethod(ArtMethod* method, bool verified)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (!method->IsNative() && !method->IsAbstract()) {
EXPECT_EQ((method->GetAccessFlags() & kAccPreverified) != 0U, verified)
@@ -1136,11 +1118,11 @@ static void CheckPreverified(mirror::Class* c, bool preverified)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
EXPECT_EQ((c->GetAccessFlags() & kAccPreverified) != 0U, preverified)
<< "Class " << PrettyClass(c) << " not as expected";
- for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
- CheckMethod(c->GetDirectMethod(i), preverified);
+ for (auto& m : c->GetDirectMethods(sizeof(void*))) {
+ CheckMethod(&m, preverified);
}
- for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
- CheckMethod(c->GetVirtualMethod(i), preverified);
+ for (auto& m : c->GetVirtualMethods(sizeof(void*))) {
+ CheckMethod(&m, preverified);
}
}
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index de3a29b..5f9e413 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -24,6 +24,7 @@
#include <stdlib.h>
#include "../../external/icu/icu4c/source/common/unicode/uvernum.h"
+#include "art_field-inl.h"
#include "base/macros.h"
#include "base/logging.h"
#include "base/stl_util.h"
@@ -31,17 +32,19 @@
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "compiler_callbacks.h"
-#include "dex_file.h"
+#include "dex_file-inl.h"
#include "gc_root-inl.h"
#include "gc/heap.h"
#include "gtest/gtest.h"
#include "handle_scope-inl.h"
#include "interpreter/unstarted_runtime.h"
#include "jni_internal.h"
+#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mem_map.h"
#include "noop_compiler_callbacks.h"
#include "os.h"
+#include "primitive.h"
#include "runtime-inl.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index fb81ad2..3acd366 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -19,12 +19,12 @@
#include <sstream>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "invoke_type.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -78,7 +78,7 @@ static void ThrowWrappedException(const char* exception_descriptor,
// AbstractMethodError
-void ThrowAbstractMethodError(mirror::ArtMethod* method) {
+void ThrowAbstractMethodError(ArtMethod* method) {
ThrowException("Ljava/lang/AbstractMethodError;", nullptr,
StringPrintf("abstract method \"%s\"",
PrettyMethod(method).c_str()).c_str());
@@ -145,7 +145,7 @@ void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* access
}
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
- mirror::ArtMethod* called,
+ ArtMethod* called,
InvokeType type) {
std::ostringstream msg;
msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' attempting to access '"
@@ -154,7 +154,7 @@ void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirr
ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
-void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::ArtMethod* accessed) {
+void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed) {
std::ostringstream msg;
msg << "Method '" << PrettyMethod(accessed) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
@@ -168,13 +168,12 @@ void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed) {
ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
-void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer,
- ArtField* accessed) {
+void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed) {
std::ostringstream msg;
msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '"
<< PrettyMethod(referrer) << "'";
ThrowException("Ljava/lang/IllegalAccessError;",
- referrer != nullptr ? referrer->GetClass() : nullptr,
+ referrer != nullptr ? referrer->GetDeclaringClass() : nullptr,
msg.str().c_str());
}
@@ -201,19 +200,18 @@ void ThrowIllegalArgumentException(const char* msg) {
// IncompatibleClassChangeError
void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
- mirror::ArtMethod* method,
- mirror::ArtMethod* referrer) {
+ ArtMethod* method, ArtMethod* referrer) {
std::ostringstream msg;
msg << "The method '" << PrettyMethod(method) << "' was expected to be of type "
<< expected_type << " but instead was found to be of type " << found_type;
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- referrer != nullptr ? referrer->GetClass() : nullptr,
+ referrer != nullptr ? referrer->GetDeclaringClass() : nullptr,
msg.str().c_str());
}
-void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(mirror::ArtMethod* interface_method,
+void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
mirror::Object* this_object,
- mirror::ArtMethod* referrer) {
+ ArtMethod* referrer) {
// Referrer is calling interface_method on this_object, however, the interface_method isn't
// implemented by this_object.
CHECK(this_object != nullptr);
@@ -223,17 +221,17 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(mirror::ArtMetho
<< PrettyDescriptor(interface_method->GetDeclaringClass())
<< "' in call to '" << PrettyMethod(interface_method) << "'";
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- referrer != nullptr ? referrer->GetClass() : nullptr,
+ referrer != nullptr ? referrer->GetDeclaringClass() : nullptr,
msg.str().c_str());
}
void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
- mirror::ArtMethod* referrer) {
+ ArtMethod* referrer) {
std::ostringstream msg;
msg << "Expected '" << PrettyField(resolved_field) << "' to be a "
<< (is_static ? "static" : "instance") << " field" << " rather than a "
<< (is_static ? "instance" : "static") << " field";
- ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer->GetClass(),
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer->GetDeclaringClass(),
msg.str().c_str());
}
@@ -317,7 +315,7 @@ void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece
}
void ThrowNoSuchMethodError(uint32_t method_idx) {
- mirror::ArtMethod* method = Thread::Current()->GetCurrentMethod(nullptr);
+ ArtMethod* method = Thread::Current()->GetCurrentMethod(nullptr);
mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
std::ostringstream msg;
@@ -353,7 +351,7 @@ void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
ThrowNullPointerExceptionForMethodAccessImpl(method_idx, dex_file, type);
}
-void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method,
+void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
InvokeType type) {
mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
@@ -363,7 +361,7 @@ void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method,
void ThrowNullPointerExceptionFromDexPC() {
uint32_t throw_dex_pc;
- mirror::ArtMethod* method = Thread::Current()->GetCurrentMethod(&throw_dex_pc);
+ ArtMethod* method = Thread::Current()->GetCurrentMethod(&throw_dex_pc);
const DexFile::CodeItem* code = method->GetCodeItem();
CHECK_LT(throw_dex_pc, code->insns_size_in_code_units_);
const Instruction* instr = Instruction::At(&code->insns_[throw_dex_pc]);
@@ -390,7 +388,7 @@ void ThrowNullPointerExceptionFromDexPC() {
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
// Since we replaced the method index, we ask the verifier to tell us which
// method is invoked at this location.
- mirror::ArtMethod* invoked_method =
+ ArtMethod* invoked_method =
verifier::MethodVerifier::FindInvokedMethodAtDexPc(method, throw_dex_pc);
if (invoked_method != nullptr) {
// NPE with precise message.
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index bd667fa..b391c5b 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -22,17 +22,17 @@
namespace art {
namespace mirror {
- class ArtMethod;
class Class;
class Object;
} // namespace mirror
class ArtField;
+class ArtMethod;
class Signature;
class StringPiece;
// AbstractMethodError
-void ThrowAbstractMethodError(mirror::ArtMethod* method)
+void ThrowAbstractMethodError(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// ArithmeticException
@@ -74,17 +74,17 @@ void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* access
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
- mirror::ArtMethod* called,
+ ArtMethod* called,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::ArtMethod* accessed)
+void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer, ArtField* accessed)
+void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
@@ -104,16 +104,16 @@ void ThrowIllegalArgumentException(const char* msg)
// IncompatibleClassChangeError
void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
- mirror::ArtMethod* method, mirror::ArtMethod* referrer)
+ ArtMethod* method, ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(mirror::ArtMethod* interface_method,
+void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
mirror::Object* this_object,
- mirror::ArtMethod* referrer)
+ ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
- mirror::ArtMethod* referrer)
+ ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...)
@@ -175,7 +175,7 @@ void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method,
+void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 728e8e3..24615e2 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -22,6 +22,7 @@
#include "arch/context.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/time_utils.h"
#include "class_linker.h"
#include "class_linker-inl.h"
@@ -32,7 +33,6 @@
#include "gc/space/space-inl.h"
#include "handle_scope.h"
#include "jdwp/object_registry.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -78,17 +78,17 @@ class AllocRecordStackTraceElement {
}
int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = Method();
+ ArtMethod* method = Method();
DCHECK(method != nullptr);
return method->GetLineNumFromDexPC(DexPc());
}
- mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
return soa.DecodeMethod(method_);
}
- void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
method_ = soa.EncodeMethod(m);
}
@@ -184,7 +184,7 @@ class AllocRecord {
class Breakpoint {
public:
- Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc,
+ Breakpoint(ArtMethod* method, uint32_t dex_pc,
DeoptimizationRequest::Kind deoptimization_kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
@@ -202,7 +202,7 @@ class Breakpoint {
method_ = soa.EncodeMethod(other.Method());
}
- mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
return soa.DecodeMethod(method_);
}
@@ -235,7 +235,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
DebugInstrumentationListener() {}
virtual ~DebugInstrumentationListener() {}
- void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
+ void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method->IsNative()) {
@@ -261,7 +261,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
}
}
- void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
+ void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc, const JValue& return_value)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method->IsNative()) {
@@ -279,14 +279,14 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
}
void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method, uint32_t dex_pc)
+ ArtMethod* method, uint32_t dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
}
- void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
+ void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t new_dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
@@ -308,13 +308,13 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
}
void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field)
+ ArtMethod* method, uint32_t dex_pc, ArtField* field)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field,
+ ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
@@ -326,14 +326,14 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
}
// We only care about how many backward branches were executed in the Jit.
- void BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ void BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t dex_pc_offset)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method)
<< " " << dex_pc_offset;
}
private:
- static bool IsReturn(mirror::ArtMethod* method, uint32_t dex_pc)
+ static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method->GetCodeItem();
const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
@@ -408,11 +408,6 @@ static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
receiver.VisitRootIfNonNull(visitor, root_info); // null for static method call.
klass.VisitRoot(visitor, root_info);
- method.VisitRoot(visitor, root_info);
-}
-
-void SingleStepControl::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
- method_.VisitRootIfNonNull(visitor, root_info);
}
void SingleStepControl::AddDexPc(uint32_t dex_pc) {
@@ -423,7 +418,7 @@ bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
return dex_pcs_.find(dex_pc) == dex_pcs_.end();
}
-static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
+static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc)
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
@@ -1395,9 +1390,8 @@ JDWP::FieldId Dbg::ToFieldId(const ArtField* f) {
return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
}
-static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
+static JDWP::MethodId ToMethodId(const ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(!kMovingMethods);
return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
}
@@ -1406,17 +1400,16 @@ static ArtField* FromFieldId(JDWP::FieldId fid)
return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
}
-static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
+static ArtMethod* FromMethodId(JDWP::MethodId mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(!kMovingMethods);
- return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
+ return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
}
bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
CHECK(event_thread != nullptr);
JDWP::JdwpError error;
- mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(expected_thread_id,
- &error);
+ mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(
+ expected_thread_id, &error);
return expected_thread_peer == event_thread->GetPeer();
}
@@ -1425,7 +1418,7 @@ bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
if (expected_location.dex_pc != event_location.dex_pc) {
return false;
}
- mirror::ArtMethod* m = FromMethodId(expected_location.method_id);
+ ArtMethod* m = FromMethodId(expected_location.method_id);
return m == event_location.method;
}
@@ -1454,7 +1447,7 @@ bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* eve
return modifier_instance == event_instance;
}
-void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
+void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_) {
@@ -1470,11 +1463,11 @@ void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, ui
}
std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
- mirror::ArtMethod* m = FromMethodId(method_id);
+ ArtMethod* m = FromMethodId(method_id);
if (m == nullptr) {
return "null";
}
- return m->GetName();
+ return m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
}
std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
@@ -1503,7 +1496,7 @@ static uint32_t MangleAccessFlags(uint32_t accessFlags) {
* expect slots to begin with arguments, but dex code places them at
* the end.
*/
-static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
+static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
@@ -1525,14 +1518,14 @@ static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
* Circularly shifts registers so that arguments come last. Reverts
* slots to dex style argument placement.
*/
-static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m, JDWP::JdwpError* error)
+static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
// return the slot as is since all registers are arguments.
LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
- uint16_t vreg_count = mirror::ArtMethod::NumArgRegisters(m->GetShorty());
+ uint16_t vreg_count = ArtMethod::NumArgRegisters(m->GetShorty());
if (slot < vreg_count) {
*error = JDWP::ERR_NONE;
return slot;
@@ -1591,14 +1584,18 @@ JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_g
expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
+ auto* cl = Runtime::Current()->GetClassLinker();
+ auto ptr_size = cl->GetImagePointerSize();
for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
- mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
+ ArtMethod* m = i < direct_method_count ?
+ c->GetDirectMethod(i, ptr_size) : c->GetVirtualMethod(i - direct_method_count, ptr_size);
expandBufAddMethodId(pReply, ToMethodId(m));
- expandBufAddUtf8String(pReply, m->GetName());
- expandBufAddUtf8String(pReply, m->GetSignature().ToString());
+ expandBufAddUtf8String(pReply, m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName());
+ expandBufAddUtf8String(pReply,
+ m->GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString());
if (with_generic) {
- static const char genericSignature[1] = "";
- expandBufAddUtf8String(pReply, genericSignature);
+ const char* generic_signature = "";
+ expandBufAddUtf8String(pReply, generic_signature);
}
expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
}
@@ -1635,7 +1632,7 @@ void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::Expan
return false;
}
};
- mirror::ArtMethod* m = FromMethodId(method_id);
+ ArtMethod* m = FromMethodId(method_id);
const DexFile::CodeItem* code_item = m->GetCodeItem();
uint64_t start, end;
if (code_item == nullptr) {
@@ -1670,7 +1667,7 @@ void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::Expan
void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
JDWP::ExpandBuf* pReply) {
struct DebugCallbackContext {
- mirror::ArtMethod* method;
+ ArtMethod* method;
JDWP::ExpandBuf* pReply;
size_t variable_count;
bool with_generic;
@@ -1699,12 +1696,12 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi
++pContext->variable_count;
}
};
- mirror::ArtMethod* m = FromMethodId(method_id);
+ ArtMethod* m = FromMethodId(method_id);
// arg_count considers doubles and longs to take 2 units.
// variable_count considers everything to take 1 unit.
std::string shorty(m->GetShorty());
- expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
+ expandBufAdd4BE(pReply, ArtMethod::NumArgRegisters(shorty));
// We don't know the total number of variables yet, so leave a blank and update it later.
size_t variable_count_offset = expandBufGetLength(pReply);
@@ -1728,7 +1725,7 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi
void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
JDWP::ExpandBuf* pReply) {
- mirror::ArtMethod* m = FromMethodId(method_id);
+ ArtMethod* m = FromMethodId(method_id);
JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
OutputJValue(tag, return_value, pReply);
}
@@ -1742,7 +1739,7 @@ void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
std::vector<uint8_t>* bytecodes) {
- mirror::ArtMethod* m = FromMethodId(method_id);
+ ArtMethod* m = FromMethodId(method_id);
if (m == nullptr) {
return JDWP::ERR_INVALID_METHODID;
}
@@ -2470,7 +2467,7 @@ class FindFrameVisitor FINAL : public StackVisitor {
if (GetFrameId() != frame_id_) {
return true; // Not our frame, carry on.
}
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsNative()) {
// We can't read/write local value from/into native method.
error_ = JDWP::ERR_OPAQUE_FRAME;
@@ -2548,7 +2545,7 @@ static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t v
JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
- mirror::ArtMethod* m = visitor.GetMethod();
+ ArtMethod* m = visitor.GetMethod();
JDWP::JdwpError error = JDWP::ERR_NONE;
uint16_t vreg = DemangleSlot(slot, m, &error);
if (error != JDWP::ERR_NONE) {
@@ -2711,7 +2708,7 @@ static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t v
JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
uint64_t value, size_t width) {
- mirror::ArtMethod* m = visitor.GetMethod();
+ ArtMethod* m = visitor.GetMethod();
JDWP::JdwpError error = JDWP::ERR_NONE;
uint16_t vreg = DemangleSlot(slot, m, &error);
if (error != JDWP::ERR_NONE) {
@@ -2786,7 +2783,7 @@ JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTa
return JDWP::ERR_NONE;
}
-static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
+static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(location != nullptr);
if (m == nullptr) {
@@ -2797,7 +2794,7 @@ static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m
}
}
-void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
+void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
int event_flags, const JValue* return_value) {
if (!IsDebuggerActive()) {
return;
@@ -2830,7 +2827,7 @@ void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* th
}
}
-void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
+void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc,
mirror::Object* this_object, ArtField* f) {
if (!IsDebuggerActive()) {
return;
@@ -2843,7 +2840,7 @@ void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
}
-void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
+void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
mirror::Object* this_object, ArtField* f,
const JValue* field_value) {
if (!IsDebuggerActive()) {
@@ -2871,14 +2868,14 @@ class CatchLocationFinder : public StackVisitor {
exception_(exception),
handle_scope_(self),
this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
- catch_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)),
- throw_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)),
+ catch_method_(nullptr),
+ throw_method_(nullptr),
catch_dex_pc_(DexFile::kDexNoIndex),
throw_dex_pc_(DexFile::kDexNoIndex) {
}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = GetMethod();
+ ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
// Ignore callee save method.
@@ -2887,25 +2884,23 @@ class CatchLocationFinder : public StackVisitor {
}
uint32_t dex_pc = GetDexPc();
- if (throw_method_.Get() == nullptr) {
+ if (throw_method_ == nullptr) {
// First Java method found. It is either the method that threw the exception,
// or the Java native method that is reporting an exception thrown by
// native code.
this_at_throw_.Assign(GetThisObject());
- throw_method_.Assign(method);
+ throw_method_ = method;
throw_dex_pc_ = dex_pc;
}
if (dex_pc != DexFile::kDexNoIndex) {
- StackHandleScope<2> hs(self_);
+ StackHandleScope<1> hs(self_);
uint32_t found_dex_pc;
Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
- Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
bool unused_clear_exception;
- found_dex_pc = mirror::ArtMethod::FindCatchBlock(
- h_method, exception_class, dex_pc, &unused_clear_exception);
+ found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
if (found_dex_pc != DexFile::kDexNoIndex) {
- catch_method_.Assign(method);
+ catch_method_ = method;
catch_dex_pc_ = found_dex_pc;
return false; // End stack walk.
}
@@ -2913,12 +2908,12 @@ class CatchLocationFinder : public StackVisitor {
return true; // Continue stack walk.
}
- mirror::ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return catch_method_.Get();
+ ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return catch_method_;
}
- mirror::ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return throw_method_.Get();
+ ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return throw_method_;
}
mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -2936,10 +2931,10 @@ class CatchLocationFinder : public StackVisitor {
private:
Thread* const self_;
const Handle<mirror::Throwable>& exception_;
- StackHandleScope<3> handle_scope_;
+ StackHandleScope<1> handle_scope_;
MutableHandle<mirror::Object> this_at_throw_;
- MutableHandle<mirror::ArtMethod> catch_method_;
- MutableHandle<mirror::ArtMethod> throw_method_;
+ ArtMethod* catch_method_;
+ ArtMethod* throw_method_;
uint32_t catch_dex_pc_;
uint32_t throw_dex_pc_;
@@ -2973,7 +2968,7 @@ void Dbg::PostClassPrepare(mirror::Class* c) {
}
void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* m, uint32_t dex_pc,
+ ArtMethod* m, uint32_t dex_pc,
int event_flags, const JValue* return_value) {
if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
return;
@@ -3222,7 +3217,7 @@ void Dbg::ManageDeoptimization() {
self->TransitionFromSuspendedToRunnable();
}
-static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
+static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
@@ -3232,19 +3227,18 @@ static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
}
// Note: method verifier may cause thread suspension.
self->AssertThreadSuspensionIsAllowable();
- StackHandleScope<3> hs(self);
+ StackHandleScope<2> hs(self);
mirror::Class* declaring_class = m->GetDeclaringClass();
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- Handle<mirror::ArtMethod> method(hs.NewHandle(m));
verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader,
- &m->GetClassDef(), code_item, m->GetDexMethodIndex(), method,
+ &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
m->GetAccessFlags(), false, true, false, true);
// Note: we don't need to verify the method.
return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
}
-static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
+static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.Method() == m) {
@@ -3254,13 +3248,13 @@ static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
return nullptr;
}
-bool Dbg::MethodHasAnyBreakpoints(mirror::ArtMethod* method) {
+bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) {
ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
return FindFirstBreakpointForMethod(method) != nullptr;
}
// Sanity checks all existing breakpoints on the same method.
-static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m,
+static void SanityCheckExistingBreakpoints(ArtMethod* m,
DeoptimizationRequest::Kind deoptimization_kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (const Breakpoint& breakpoint : gBreakpoints) {
@@ -3289,7 +3283,7 @@ static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m,
// If a breakpoint has already been set, we also return the first breakpoint
// through the given 'existing_brkpt' pointer.
static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
- mirror::ArtMethod* m,
+ ArtMethod* m,
const Breakpoint** existing_brkpt)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (!Dbg::RequiresDeoptimization()) {
@@ -3353,7 +3347,7 @@ static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
// request if we need to deoptimize.
void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
Thread* const self = Thread::Current();
- mirror::ArtMethod* m = FromMethodId(location->method_id);
+ ArtMethod* m = FromMethodId(location->method_id);
DCHECK(m != nullptr) << "No method for method id " << location->method_id;
const Breakpoint* existing_breakpoint = nullptr;
@@ -3388,7 +3382,7 @@ void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationReques
// request if we need to undeoptimize.
void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
- mirror::ArtMethod* m = FromMethodId(location->method_id);
+ ArtMethod* m = FromMethodId(location->method_id);
DCHECK(m != nullptr) << "No method for method id " << location->method_id;
DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
@@ -3428,7 +3422,7 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequ
}
}
-bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMethod* m) {
+bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) {
const SingleStepControl* const ssc = thread->GetSingleStepControl();
if (ssc == nullptr) {
// If we are not single-stepping, then we don't have to force interpreter.
@@ -3448,7 +3442,7 @@ bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMet
return false;
}
-bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) {
+bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
// If we are in interpreter only mode, then we don't have to force interpreter.
@@ -3482,7 +3476,7 @@ bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::Art
return instrumentation->IsDeoptimized(m);
}
-bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) {
+bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
// The upcall can be null and in that case we don't need to do anything.
if (m == nullptr) {
return false;
@@ -3519,7 +3513,7 @@ bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror:
return instrumentation->IsDeoptimized(m);
}
-bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) {
+bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
// The upcall can be null and in that case we don't need to do anything.
if (m == nullptr) {
return false;
@@ -3623,7 +3617,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (!m->IsRuntimeMethod()) {
++stack_depth;
if (method == nullptr) {
@@ -3639,7 +3633,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize
}
int stack_depth;
- mirror::ArtMethod* method;
+ ArtMethod* method;
int32_t line_number;
};
@@ -3701,7 +3695,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize
return JDWP::ERR_OUT_OF_MEMORY;
}
- mirror::ArtMethod* m = single_step_control->GetMethod();
+ ArtMethod* m = single_step_control->GetMethod();
const int32_t line_number = visitor.line_number;
// Note: if the thread is not running Java code (pure native thread), there is no "current"
// method on the stack (and no line number either).
@@ -3838,7 +3832,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
return error;
}
- mirror::ArtMethod* m = FromMethodId(method_id);
+ ArtMethod* m = FromMethodId(method_id);
if (m->IsStatic() != (receiver == nullptr)) {
return JDWP::ERR_INVALID_METHODID;
}
@@ -3860,8 +3854,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
}
{
- StackHandleScope<3> hs(soa.Self());
- HandleWrapper<mirror::ArtMethod> h_m(hs.NewHandleWrapper(&m));
+ StackHandleScope<2> hs(soa.Self());
HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
const DexFile::TypeList* types = m->GetParameterTypeList();
@@ -3873,7 +3866,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
if (shorty[i + 1] == 'L') {
// Did we really get an argument of an appropriate reference type?
mirror::Class* parameter_type =
- h_m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_, true);
+ m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_, true);
mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
if (error != JDWP::ERR_NONE) {
return JDWP::ERR_INVALID_OBJECT;
@@ -3976,32 +3969,34 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
// We can be called while an exception is pending. We need
// to preserve that across the method invocation.
- StackHandleScope<4> hs(soa.Self());
+ StackHandleScope<3> hs(soa.Self());
auto old_exception = hs.NewHandle<mirror::Throwable>(soa.Self()->GetException());
soa.Self()->ClearException();
// Translate the method through the vtable, unless the debugger wants to suppress it.
- MutableHandle<mirror::ArtMethod> m(hs.NewHandle(pReq->method.Read()));
+ auto* m = pReq->method;
+ auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
- mirror::ArtMethod* actual_method = pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m.Get());
- if (actual_method != m.Get()) {
- VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get())
+ ArtMethod* actual_method =
+ pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
+ if (actual_method != m) {
+ VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m)
<< " to " << PrettyMethod(actual_method);
- m.Assign(actual_method);
+ m = actual_method;
}
}
- VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
+ VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m)
<< " receiver=" << pReq->receiver.Read()
<< " arg_count=" << pReq->arg_count;
- CHECK(m.Get() != nullptr);
+ CHECK(m != nullptr);
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
- JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m.Get()),
+ JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
reinterpret_cast<jvalue*>(pReq->arg_values));
- pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
+ pReq->result_tag = BasicTagFromDescriptor(m->GetShorty());
const bool is_object_result = (pReq->result_tag == JDWP::JT_OBJECT);
Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
@@ -4744,7 +4739,7 @@ struct AllocRecordStackVisitor : public StackVisitor {
if (depth >= kMaxAllocRecordStackDepth) {
return false;
}
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (!m->IsRuntimeMethod()) {
record->StackElement(depth)->SetMethod(m);
record->StackElement(depth)->SetDexPc(GetDexPc());
@@ -4828,7 +4823,7 @@ void Dbg::DumpRecentAllocations() {
for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
- mirror::ArtMethod* m = stack_element->Method();
+ ArtMethod* m = stack_element->Method();
if (m == nullptr) {
break;
}
@@ -4884,7 +4879,7 @@ class StringTable {
DISALLOW_COPY_AND_ASSIGN(StringTable);
};
-static const char* GetMethodSourceFile(mirror::ArtMethod* method)
+static const char* GetMethodSourceFile(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(method != nullptr);
const char* source_file = method->GetDeclaringClassSourceFile();
@@ -4957,7 +4952,7 @@ jbyteArray Dbg::GetRecentAllocations() {
std::string temp;
class_names.Add(record->Type()->GetDescriptor(&temp));
for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
- mirror::ArtMethod* m = record->StackElement(i)->Method();
+ ArtMethod* m = record->StackElement(i)->Method();
if (m != nullptr) {
class_names.Add(m->GetDeclaringClassDescriptor());
method_names.Add(m->GetName());
@@ -5019,7 +5014,7 @@ jbyteArray Dbg::GetRecentAllocations() {
// (2b) method name
// (2b) method source file
// (2b) line number, clipped to 32767; -2 if native; -1 if no source
- mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
+ ArtMethod* m = record->StackElement(stack_frame)->Method();
size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
size_t method_name_index = method_names.IndexOf(m->GetName());
size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
@@ -5047,12 +5042,12 @@ jbyteArray Dbg::GetRecentAllocations() {
return result;
}
-mirror::ArtMethod* DeoptimizationRequest::Method() const {
+ArtMethod* DeoptimizationRequest::Method() const {
ScopedObjectAccessUnchecked soa(Thread::Current());
return soa.DecodeMethod(method_);
}
-void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
+void DeoptimizationRequest::SetMethod(ArtMethod* m) {
ScopedObjectAccessUnchecked soa(Thread::Current());
method_ = soa.EncodeMethod(m);
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 811d345..7c586a4 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -37,13 +37,13 @@
namespace art {
namespace mirror {
-class ArtMethod;
class Class;
class Object;
class Throwable;
} // namespace mirror
class AllocRecord;
class ArtField;
+class ArtMethod;
class ObjectRegistry;
class ScopedObjectAccessUnchecked;
class StackVisitor;
@@ -54,7 +54,7 @@ class Thread;
*/
struct DebugInvokeReq {
DebugInvokeReq(mirror::Object* invoke_receiver, mirror::Class* invoke_class,
- mirror::ArtMethod* invoke_method, uint32_t invoke_options,
+ ArtMethod* invoke_method, uint32_t invoke_options,
uint64_t* args, uint32_t args_count)
: receiver(invoke_receiver), klass(invoke_class), method(invoke_method),
arg_count(args_count), arg_values(args), options(invoke_options),
@@ -66,7 +66,7 @@ struct DebugInvokeReq {
/* request */
GcRoot<mirror::Object> receiver; // not used for ClassType.InvokeMethod
GcRoot<mirror::Class> klass;
- GcRoot<mirror::ArtMethod> method;
+ ArtMethod* method;
const uint32_t arg_count;
uint64_t* const arg_values; // will be null if arg_count_ == 0
const uint32_t options;
@@ -92,7 +92,7 @@ struct DebugInvokeReq {
class SingleStepControl {
public:
SingleStepControl(JDWP::JdwpStepSize step_size, JDWP::JdwpStepDepth step_depth,
- int stack_depth, mirror::ArtMethod* method)
+ int stack_depth, ArtMethod* method)
: step_size_(step_size), step_depth_(step_depth),
stack_depth_(stack_depth), method_(method) {
}
@@ -109,17 +109,14 @@ class SingleStepControl {
return stack_depth_;
}
- mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return method_.Read();
+ ArtMethod* GetMethod() const {
+ return method_;
}
const std::set<uint32_t>& GetDexPcs() const {
return dex_pcs_;
}
- void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void AddDexPc(uint32_t dex_pc);
bool ContainsDexPc(uint32_t dex_pc) const;
@@ -138,7 +135,8 @@ class SingleStepControl {
// set of DEX pcs associated to the source line number where the suspension occurred.
// This is used to support SD_INTO and SD_OVER single-step depths so we detect when a single-step
// causes the execution of an instruction in a different method or at a different line number.
- GcRoot<mirror::ArtMethod> method_;
+ ArtMethod* method_;
+
std::set<uint32_t> dex_pcs_;
DISALLOW_COPY_AND_ASSIGN(SingleStepControl);
@@ -166,9 +164,9 @@ class DeoptimizationRequest {
SetMethod(other.Method());
}
- mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Name 'Kind()' would collide with the above enum name.
Kind GetKind() const {
@@ -256,7 +254,7 @@ class Dbg {
static bool IsJdwpConfigured();
// Returns true if a method has any breakpoints.
- static bool MethodHasAnyBreakpoints(mirror::ArtMethod* method)
+ static bool MethodHasAnyBreakpoints(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::breakpoint_lock_);
@@ -524,10 +522,10 @@ class Dbg {
kMethodEntry = 0x04,
kMethodExit = 0x08,
};
- static void PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
+ static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
+ static void PostFieldModificationEvent(ArtMethod* m, int dex_pc,
mirror::Object* this_object, ArtField* f,
const JValue* field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -541,7 +539,7 @@ class Dbg {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void UpdateDebugger(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t new_dex_pc,
+ ArtMethod* method, uint32_t new_dex_pc,
int event_flags, const JValue* return_value)
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -574,7 +572,7 @@ class Dbg {
// Indicates whether we need to force the use of interpreter to invoke a method.
// This allows to single-step or continue into the called method.
- static bool IsForcedInterpreterNeededForCalling(Thread* thread, mirror::ArtMethod* m)
+ static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
@@ -585,7 +583,7 @@ class Dbg {
// Indicates whether we need to force the use of interpreter entrypoint when calling a
// method through the resolution trampoline. This allows to single-step or continue into
// the called method.
- static bool IsForcedInterpreterNeededForResolution(Thread* thread, mirror::ArtMethod* m)
+ static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
@@ -596,7 +594,7 @@ class Dbg {
// Indicates whether we need to force the use of instrumentation entrypoint when calling
// a method through the resolution trampoline. This allows to deoptimize the stack for
// debugging when we returned from the called method.
- static bool IsForcedInstrumentationNeededForResolution(Thread* thread, mirror::ArtMethod* m)
+ static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
@@ -607,7 +605,7 @@ class Dbg {
// Indicates whether we need to force the use of interpreter when returning from the
// interpreter into the runtime. This allows to deoptimize the stack and continue
// execution with interpreter for debugging.
- static bool IsForcedInterpreterNeededForUpcall(Thread* thread, mirror::ArtMethod* m)
+ static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
@@ -709,7 +707,7 @@ class Dbg {
static JDWP::FieldId ToFieldId(const ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
+ static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static JDWP::JdwpState* GetJdwpState();
@@ -733,7 +731,7 @@ class Dbg {
static void PostThreadStartOrStop(Thread*, uint32_t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void PostLocationEvent(mirror::ArtMethod* method, int pcOffset,
+ static void PostLocationEvent(ArtMethod* method, int pcOffset,
mirror::Object* thisPtr, int eventFlags,
const JValue* return_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -745,16 +743,16 @@ class Dbg {
EXCLUSIVE_LOCKS_REQUIRED(Locks::deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMethod* m)
+ static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m)
+ static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m)
+ static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m)
+ static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(Locks::alloc_tracker_lock_);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index dfe5a04..25d5ef4 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -28,6 +28,7 @@
#include <sstream>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/stringprintf.h"
#include "class_linker.h"
@@ -35,7 +36,6 @@
#include "dex_file_verifier.h"
#include "globals.h"
#include "leb128.h"
-#include "mirror/art_method-inl.h"
#include "mirror/string.h"
#include "os.h"
#include "safe_map.h"
@@ -760,7 +760,7 @@ const Signature DexFile::CreateSignature(const StringPiece& signature) const {
return Signature(this, *proto_id);
}
-int32_t DexFile::GetLineNumFromPC(mirror::ArtMethod* method, uint32_t rel_pc) const {
+int32_t DexFile::GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const {
// For native method, lineno should be -2 to indicate it is native. Note that
// "line number == -2" is how libcore tells from StackTraceElement.
if (method->GetCodeItemOffset() == 0) {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 84eaa4a..d017601 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -37,11 +37,11 @@ namespace art {
// TODO: remove dependencies on mirror classes, primarily by moving
// EncodedStaticFieldValueIterator to its own file.
namespace mirror {
- class ArtMethod;
class ClassLoader;
class DexCache;
} // namespace mirror
class ArtField;
+class ArtMethod;
class ClassLinker;
class MemMap;
class OatDexFile;
@@ -861,7 +861,7 @@ class DexFile {
// Returns -2 for native methods (as expected in exception traces).
//
// This is used by runtime; therefore use art::Method not art::DexFile::Method.
- int32_t GetLineNumFromPC(mirror::ArtMethod* method, uint32_t rel_pc) const
+ int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 9292cff..a4dd55c 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -19,6 +19,7 @@
#include "entrypoint_utils.h"
+#include "art_method.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file.h"
@@ -27,7 +28,6 @@
#include "indirect_reference_table.h"
#include "invoke_type.h"
#include "jni_internal.h"
-#include "mirror/art_method.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -38,15 +38,15 @@
namespace art {
-inline mirror::ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
+inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- auto* refs_only_sp = self->GetManagedStack()->GetTopQuickFrame();
- DCHECK_EQ(refs_only_sp->AsMirrorPtr(), Runtime::Current()->GetCalleeSaveMethod(type));
+ auto** refs_only_sp = self->GetManagedStack()->GetTopQuickFrame();
+ DCHECK_EQ(*refs_only_sp, Runtime::Current()->GetCalleeSaveMethod(type));
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
- auto* caller_sp = reinterpret_cast<StackReference<mirror::ArtMethod>*>(
- reinterpret_cast<uintptr_t>(refs_only_sp) + callee_frame_size);
- auto* caller = caller_sp->AsMirrorPtr();
+ auto** caller_sp = reinterpret_cast<ArtMethod**>(
+ reinterpret_cast<uintptr_t>(refs_only_sp) + callee_frame_size);
+ auto* caller = *caller_sp;
if (kIsDebugBuild) {
NthCallerVisitor visitor(self, 1, true);
@@ -60,7 +60,7 @@ inline mirror::ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::Calle
template <const bool kAccessCheck>
ALWAYS_INLINE
inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
+ ArtMethod* method,
Thread* self, bool* slow_path) {
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
if (UNLIKELY(klass == nullptr)) {
@@ -141,7 +141,7 @@ inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
+ ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
bool slow_path = false;
@@ -193,7 +193,7 @@ template <bool kAccessCheck>
ALWAYS_INLINE
inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
int32_t component_count,
- mirror::ArtMethod* method,
+ ArtMethod* method,
bool* slow_path) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
@@ -229,7 +229,7 @@ template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
int32_t component_count,
- mirror::ArtMethod* method,
+ ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
bool slow_path = false;
@@ -252,7 +252,7 @@ template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
int32_t component_count,
- mirror::ArtMethod* method,
+ ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
@@ -274,7 +274,7 @@ inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
}
template<FindFieldType type, bool access_check>
-inline ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+inline ArtField* FindFieldFromCode(uint32_t field_idx, ArtMethod* referrer,
Thread* self, size_t expected_size) {
bool is_primitive;
bool is_set;
@@ -347,8 +347,8 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referr
#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
- mirror::ArtMethod* referrer, \
- Thread* self, size_t expected_size) \
+ ArtMethod* referrer, \
+ Thread* self, size_t expected_size) \
#define EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, false); \
@@ -367,17 +367,16 @@ EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveWrite);
#undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
template<InvokeType type, bool access_check>
-inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
- mirror::Object** this_object,
- mirror::ArtMethod** referrer, Thread* self) {
+inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_object,
+ ArtMethod** referrer, Thread* self) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer);
+ ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer);
if (resolved_method == nullptr) {
StackHandleScope<1> hs(self);
mirror::Object* null_this = nullptr;
HandleWrapper<mirror::Object> h_this(
hs.NewHandleWrapper(type == kStatic ? &null_this : this_object));
- resolved_method = class_linker->ResolveMethod(self, method_idx, referrer, type);
+ resolved_method = class_linker->ResolveMethod(self, method_idx, *referrer, type);
}
if (UNLIKELY(resolved_method == nullptr)) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
@@ -420,7 +419,7 @@ inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
return nullptr; // Failure.
}
DCHECK(klass->HasVTable()) << PrettyClass(klass);
- return klass->GetVTableEntry(vtable_index);
+ return klass->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize());
}
case kSuper: {
mirror::Class* super_class = (*referrer)->GetDeclaringClass()->GetSuperClass();
@@ -439,23 +438,25 @@ inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
DCHECK(super_class != nullptr);
}
DCHECK(super_class->HasVTable());
- return super_class->GetVTableEntry(vtable_index);
+ return super_class->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize());
}
case kInterface: {
uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize;
- mirror::ArtMethod* imt_method = (*this_object)->GetClass()->GetEmbeddedImTableEntry(imt_index);
+ ArtMethod* imt_method = (*this_object)->GetClass()->GetEmbeddedImTableEntry(
+ imt_index, class_linker->GetImagePointerSize());
if (!imt_method->IsImtConflictMethod() && !imt_method->IsImtUnimplementedMethod()) {
if (kIsDebugBuild) {
mirror::Class* klass = (*this_object)->GetClass();
- mirror::ArtMethod* method = klass->FindVirtualMethodForInterface(resolved_method);
+ ArtMethod* method = klass->FindVirtualMethodForInterface(
+ resolved_method, class_linker->GetImagePointerSize());
CHECK_EQ(imt_method, method) << PrettyMethod(resolved_method) << " / " <<
PrettyMethod(imt_method) << " / " << PrettyMethod(method) << " / " <<
PrettyClass(klass);
}
return imt_method;
} else {
- mirror::ArtMethod* interface_method =
- (*this_object)->GetClass()->FindVirtualMethodForInterface(resolved_method);
+ ArtMethod* interface_method = (*this_object)->GetClass()->FindVirtualMethodForInterface(
+ resolved_method, class_linker->GetImagePointerSize());
if (UNLIKELY(interface_method == nullptr)) {
ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method,
*this_object, *referrer);
@@ -473,10 +474,10 @@ inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
// Explicit template declarations of FindMethodFromCode for all invoke types.
#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
- mirror::ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
- mirror::Object** this_object, \
- mirror::ArtMethod** referrer, \
- Thread* self)
+ ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
+ mirror::Object** this_object, \
+ ArtMethod** referrer, \
+ Thread* self)
#define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false); \
EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, true)
@@ -491,9 +492,8 @@ EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kInterface);
#undef EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL
// Fast path field resolution that can't initialize classes or throw exceptions.
-inline ArtField* FindFieldFast(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- FindFieldType type, size_t expected_size) {
+inline ArtField* FindFieldFast(uint32_t field_idx, ArtMethod* referrer, FindFieldType type,
+ size_t expected_size) {
ArtField* resolved_field =
referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx, sizeof(void*));
if (UNLIKELY(resolved_field == nullptr)) {
@@ -530,8 +530,7 @@ inline ArtField* FindFieldFast(uint32_t field_idx,
}
mirror::Class* referring_class = referrer->GetDeclaringClass();
if (UNLIKELY(!referring_class->CanAccess(fields_class) ||
- !referring_class->CanAccessMember(fields_class,
- resolved_field->GetAccessFlags()) ||
+ !referring_class->CanAccessMember(fields_class, resolved_field->GetAccessFlags()) ||
(is_set && resolved_field->IsFinal() && (fields_class != referring_class)))) {
// Illegal access.
return nullptr;
@@ -544,15 +543,13 @@ inline ArtField* FindFieldFast(uint32_t field_idx,
}
// Fast path method resolution that can't throw exceptions.
-inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- bool access_check, InvokeType type) {
+inline ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_object,
+ ArtMethod* referrer, bool access_check, InvokeType type) {
if (UNLIKELY(this_object == nullptr && type != kStatic)) {
return nullptr;
}
- mirror::ArtMethod* resolved_method =
- referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx);
+ ArtMethod* resolved_method =
+ referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx, sizeof(void*));
if (UNLIKELY(resolved_method == nullptr)) {
return nullptr;
}
@@ -572,22 +569,21 @@ inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
}
}
if (type == kInterface) { // Most common form of slow path dispatch.
- return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method);
+ return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method, sizeof(void*));
} else if (type == kStatic || type == kDirect) {
return resolved_method;
} else if (type == kSuper) {
- return referrer->GetDeclaringClass()->GetSuperClass()
- ->GetVTableEntry(resolved_method->GetMethodIndex());
+ return referrer->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
+ resolved_method->GetMethodIndex(), sizeof(void*));
} else {
DCHECK(type == kVirtual);
- return this_object->GetClass()->GetVTableEntry(resolved_method->GetMethodIndex());
+ return this_object->GetClass()->GetVTableEntry(
+ resolved_method->GetMethodIndex(), sizeof(void*));
}
}
-inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* self, bool can_run_clinit,
- bool verify_access) {
+inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, ArtMethod* referrer, Thread* self,
+ bool can_run_clinit, bool verify_access) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* klass = class_linker->ResolveType(type_idx, referrer);
if (UNLIKELY(klass == nullptr)) {
@@ -620,8 +616,7 @@ inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
return h_class.Get();
}
-inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
- uint32_t string_idx) {
+inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
return class_linker->ResolveString(string_idx, referrer);
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index ce56739..fc7f8b7 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -17,11 +17,11 @@
#include "entrypoints/entrypoint_utils.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/mutex.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
@@ -35,7 +35,7 @@ namespace art {
static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx,
int32_t component_count,
- mirror::ArtMethod* referrer,
+ ArtMethod* referrer,
Thread* self,
bool access_check)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -76,7 +76,7 @@ static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx,
// Helper function to allocate array for FILLED_NEW_ARRAY.
mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count,
- mirror::ArtMethod* referrer, Thread* self,
+ ArtMethod* referrer, Thread* self,
bool access_check,
gc::AllocatorType /* allocator_type */) {
mirror::Class* klass = CheckFilledNewArrayAlloc(type_idx, component_count, referrer, self,
@@ -96,7 +96,7 @@ mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_c
// Helper function to allocate array for FILLED_NEW_ARRAY.
mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
int32_t component_count,
- mirror::ArtMethod* referrer,
+ ArtMethod* referrer,
Thread* self,
bool access_check,
gc::AllocatorType /* allocator_type */) {
@@ -294,22 +294,19 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj);
mirror::Class* proxy_class = rcvr->GetClass();
mirror::Method* interface_method = soa.Decode<mirror::Method*>(interface_method_jobj);
- mirror::ArtMethod* proxy_method =
- rcvr->GetClass()->FindVirtualMethodForInterface(interface_method->GetArtMethod());
- int throws_index = -1;
- size_t num_virt_methods = proxy_class->NumVirtualMethods();
- for (size_t i = 0; i < num_virt_methods; i++) {
- if (proxy_class->GetVirtualMethod(i) == proxy_method) {
- throws_index = i;
- break;
- }
- }
- CHECK_NE(throws_index, -1);
+ ArtMethod* proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(
+ interface_method->GetArtMethod(), sizeof(void*));
+ auto* virtual_methods = proxy_class->GetVirtualMethodsPtr();
+ size_t num_virtuals = proxy_class->NumVirtualMethods();
+ size_t method_size = ArtMethod::ObjectSize(sizeof(void*));
+ int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) -
+ reinterpret_cast<uintptr_t>(virtual_methods)) / method_size;
+ CHECK_LT(throws_index, static_cast<int>(num_virtuals));
mirror::ObjectArray<mirror::Class>* declared_exceptions =
proxy_class->GetThrows()->Get(throws_index);
mirror::Class* exception_class = exception->GetClass();
bool declares_exception = false;
- for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) {
+ for (int32_t i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) {
mirror::Class* declared_exception = declared_exceptions->Get(i);
declares_exception = declared_exception->IsAssignableFrom(exception_class);
}
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 8d419f8..47865a2 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -31,19 +31,19 @@ namespace art {
namespace mirror {
class Array;
- class ArtMethod;
class Class;
class Object;
class String;
} // namespace mirror
class ArtField;
+class ArtMethod;
class ScopedObjectAccessAlreadyRunnable;
class Thread;
template <const bool kAccessCheck>
ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
+ ArtMethod* method,
Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -58,7 +58,7 @@ ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::
// check.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
+ ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -81,7 +81,7 @@ ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Clas
template <bool kAccessCheck>
ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
int32_t component_count,
- mirror::ArtMethod* method,
+ ArtMethod* method,
bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -92,7 +92,7 @@ ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
int32_t component_count,
- mirror::ArtMethod* method,
+ ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -100,20 +100,20 @@ ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
int32_t component_count,
- mirror::ArtMethod* method,
+ ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count,
- mirror::ArtMethod* method, Thread* self,
+ ArtMethod* method, Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
int32_t component_count,
- mirror::ArtMethod* method,
+ ArtMethod* method,
Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
@@ -132,38 +132,33 @@ enum FindFieldType {
};
template<FindFieldType type, bool access_check>
-inline ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
- Thread* self, size_t expected_size)
+inline ArtField* FindFieldFromCode(
+ uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<InvokeType type, bool access_check>
-inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
- mirror::Object** this_object,
- mirror::ArtMethod** referrer, Thread* self)
+inline ArtMethod* FindMethodFromCode(
+ uint32_t method_idx, mirror::Object** this_object, ArtMethod** referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Fast path field resolution that can't initialize classes or throw exceptions.
-inline ArtField* FindFieldFast(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- FindFieldType type, size_t expected_size)
+inline ArtField* FindFieldFast(
+ uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Fast path method resolution that can't throw exceptions.
-inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- bool access_check, InvokeType type)
+inline ArtMethod* FindMethodFast(
+ uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check,
+ InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* self, bool can_run_clinit,
- bool verify_access)
+inline mirror::Class* ResolveVerifyAndClinit(
+ uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer, uint32_t string_idx)
+inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index d4844c2..72c2e0a 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -14,10 +14,10 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "class_linker.h"
#include "dex_file-inl.h"
#include "interpreter/interpreter.h"
-#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
#include "reflection.h"
#include "runtime.h"
@@ -27,7 +27,7 @@ namespace art {
extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result) {
- mirror::ArtMethod* method = shadow_frame->GetMethod();
+ ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
if (method->IsStatic()) {
mirror::Class* declaringClass = method->GetDeclaringClass();
@@ -50,7 +50,7 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::
uint16_t arg_offset = (code_item == nullptr) ? 0 : code_item->registers_size_ - code_item->ins_size_;
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
(shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
- result, method->GetShorty());
+ result, method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty());
}
} // namespace art
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index a68eeeb..22226c1 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -14,9 +14,9 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "base/logging.h"
#include "entrypoints/entrypoint_utils.h"
-#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
@@ -34,7 +34,7 @@ extern "C" void* artFindNativeMethod(Thread* self) {
Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
ScopedObjectAccess soa(self);
- mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+ ArtMethod* method = self->GetCurrentMethod(nullptr);
DCHECK(method != nullptr);
// Lookup symbol address for method, on failure we'll return null with an exception set,
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 8cd6ca6..521c549 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -32,9 +32,7 @@
#include "arch/x86_64/quick_method_frame_info_x86_64.h"
namespace art {
-namespace mirror {
class ArtMethod;
-} // namespace mirror
class ScopedQuickEntrypointChecks {
public:
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index fa129af..f56b5e4 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -16,9 +16,9 @@
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
@@ -29,7 +29,7 @@ static constexpr bool kUseTlabFastPath = true;
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
+ uint32_t type_idx, ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
@@ -56,7 +56,7 @@ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
@@ -83,7 +83,7 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
@@ -108,34 +108,34 @@ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
+ uint32_t type_idx, ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
- uint32_t type_idx, int32_t component_count, mirror::ArtMethod* method, Thread* self) \
+ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<false, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, int32_t component_count, mirror::ArtMethod* method, Thread* self) \
+ mirror::Class* klass, int32_t component_count, ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, int32_t component_count, mirror::ArtMethod* method, Thread* self) \
+ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<true, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
- uint32_t type_idx, int32_t component_count, mirror::ArtMethod* method, Thread* self) \
+ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!instrumented_bool) { \
@@ -145,7 +145,7 @@ extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
} \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, int32_t component_count, mirror::ArtMethod* method, Thread* self) \
+ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!instrumented_bool) { \
@@ -193,27 +193,27 @@ GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(Region, gc::kAllocatorTypeRegion)
GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(RegionTLAB, gc::kAllocatorTypeRegionTLAB)
#define GENERATE_ENTRYPOINTS(suffix) \
-extern "C" void* art_quick_alloc_array##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_array_resolved##suffix(mirror::Class* klass, int32_t, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \
+extern "C" void* art_quick_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_array_resolved##suffix(mirror::Class* klass, int32_t, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \
+extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix(void*, int32_t, int32_t, int32_t); \
extern "C" void* art_quick_alloc_string_from_chars##suffix(int32_t, int32_t, void*); \
extern "C" void* art_quick_alloc_string_from_string##suffix(void*); \
-extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \
-extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \
+extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
+extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix##_instrumented(void*, int32_t, int32_t, int32_t); \
extern "C" void* art_quick_alloc_string_from_chars##suffix##_instrumented(int32_t, int32_t, void*); \
extern "C" void* art_quick_alloc_string_from_string##suffix##_instrumented(void*); \
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index 1fd8a949a..c7aaa20 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -22,10 +22,10 @@
namespace art {
namespace mirror {
class Array;
-class ArtMethod;
class Class;
class Object;
} // namespace mirror
+class ArtMethod;
} // namespace art
// These are extern declarations of assembly stubs with common names.
@@ -97,9 +97,9 @@ extern "C" int32_t art_quick_string_compareto(void*, void*);
extern "C" void* art_quick_memcpy(void*, const void*, size_t);
// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(art::mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(art::mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(art::mirror::ArtMethod*);
+extern "C" void art_quick_imt_conflict_trampoline(art::ArtMethod*);
+extern "C" void art_quick_resolution_trampoline(art::ArtMethod*);
+extern "C" void art_quick_to_interpreter_bridge(art::ArtMethod*);
extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 6a8aaf2..3eefeef 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -17,7 +17,6 @@
#include "callee_save_frame.h"
#include "dex_file-inl.h"
#include "interpreter/interpreter.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 46629f5..67649d4 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -14,12 +14,12 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index b72ce34..cef2510 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -29,13 +29,13 @@ namespace art {
namespace mirror {
class Array;
-class ArtMethod;
class Class;
class Object;
template<class MirrorType>
class CompressedReference;
} // namespace mirror
+class ArtMethod;
class Thread;
// Pointers to functions that are called by quick compiler generated code via thread-local storage.
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 0aca58f..60bbf4a 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -20,15 +20,15 @@
// All quick entrypoints. Format is name, return type, argument types.
#define QUICK_ENTRYPOINT_LIST(V) \
- V(AllocArray, void*, uint32_t, int32_t, mirror::ArtMethod*) \
- V(AllocArrayResolved, void*, mirror::Class*, int32_t, mirror::ArtMethod*) \
- V(AllocArrayWithAccessCheck, void*, uint32_t, int32_t, mirror::ArtMethod*) \
- V(AllocObject, void*, uint32_t, mirror::ArtMethod*) \
- V(AllocObjectResolved, void*, mirror::Class*, mirror::ArtMethod*) \
- V(AllocObjectInitialized, void*, mirror::Class*, mirror::ArtMethod*) \
- V(AllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*) \
- V(CheckAndAllocArray, void*, uint32_t, int32_t, mirror::ArtMethod*) \
- V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, int32_t, mirror::ArtMethod*) \
+ V(AllocArray, void*, uint32_t, int32_t, ArtMethod*) \
+ V(AllocArrayResolved, void*, mirror::Class*, int32_t, ArtMethod*) \
+ V(AllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \
+ V(AllocObject, void*, uint32_t, ArtMethod*) \
+ V(AllocObjectResolved, void*, mirror::Class*, ArtMethod*) \
+ V(AllocObjectInitialized, void*, mirror::Class*, ArtMethod*) \
+ V(AllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*) \
+ V(CheckAndAllocArray, void*, uint32_t, int32_t, ArtMethod*) \
+ V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \
V(AllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t) \
V(AllocStringFromChars, void*, int32_t, int32_t, void*) \
V(AllocStringFromString, void*, void*) \
@@ -77,7 +77,7 @@
V(JniMethodEndSynchronized, void, uint32_t, jobject, Thread*) \
V(JniMethodEndWithReference, mirror::Object*, jobject, uint32_t, Thread*) \
V(JniMethodEndWithReferenceSynchronized, mirror::Object*, jobject, uint32_t, jobject, Thread*) \
- V(QuickGenericJniTrampoline, void, mirror::ArtMethod*) \
+ V(QuickGenericJniTrampoline, void, ArtMethod*) \
\
V(LockObject, void, mirror::Object*) \
V(UnlockObject, void, mirror::Object*) \
@@ -106,9 +106,9 @@
V(StringCompareTo, int32_t, void*, void*) \
V(Memcpy, void*, void*, const void*, size_t) \
\
- V(QuickImtConflictTrampoline, void, mirror::ArtMethod*) \
- V(QuickResolutionTrampoline, void, mirror::ArtMethod*) \
- V(QuickToInterpreterBridge, void, mirror::ArtMethod*) \
+ V(QuickImtConflictTrampoline, void, ArtMethod*) \
+ V(QuickResolutionTrampoline, void, ArtMethod*) \
+ V(QuickToInterpreterBridge, void, ArtMethod*) \
V(InvokeDirectTrampolineWithAccessCheck, void, uint32_t, void*) \
V(InvokeInterfaceTrampolineWithAccessCheck, void, uint32_t, void*) \
V(InvokeStaticTrampolineWithAccessCheck, void, uint32_t, void*) \
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index b5a7c09..871cf3c 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -15,17 +15,17 @@
*/
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include <stdint.h>
namespace art {
-extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -40,7 +40,7 @@ extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, mirror::ArtMethod
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -55,7 +55,7 @@ extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, mirror::ArtMe
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -71,7 +71,7 @@ extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, mirror::ArtMeth
}
extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx,
- mirror::ArtMethod* referrer,
+ ArtMethod* referrer,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -87,7 +87,7 @@ extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx,
}
extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
- mirror::ArtMethod* referrer,
+ ArtMethod* referrer,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -103,7 +103,7 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
}
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
- mirror::ArtMethod* referrer,
+ ArtMethod* referrer,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -119,7 +119,7 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
}
extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
- mirror::ArtMethod* referrer,
+ ArtMethod* referrer,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -137,7 +137,7 @@ extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
}
extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
@@ -157,7 +157,7 @@ extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object*
}
extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
@@ -176,7 +176,7 @@ extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Obj
return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
@@ -196,7 +196,7 @@ extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Objec
}
extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
@@ -216,7 +216,7 @@ extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Objec
}
extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
@@ -236,7 +236,7 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object*
}
extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
@@ -256,7 +256,7 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object*
}
extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer,
+ ArtMethod* referrer,
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -278,7 +278,7 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror:
}
extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
@@ -309,7 +309,7 @@ extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value,
}
extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
@@ -340,7 +340,7 @@ extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value,
}
extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
@@ -358,7 +358,7 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
return -1; // failure
}
-extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+extern "C" int artSet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer,
uint64_t new_value, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -378,7 +378,7 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* ref
}
extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
@@ -401,7 +401,7 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_v
}
extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
@@ -440,7 +440,7 @@ extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
}
extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
@@ -480,7 +480,7 @@ extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
}
extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
@@ -508,7 +508,7 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
}
extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
@@ -533,7 +533,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::Object* new_value,
- mirror::ArtMethod* referrer, Thread* self)
+ ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index e336543..d3991cd 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -14,9 +14,9 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "mirror/array.h"
-#include "mirror/art_method-inl.h"
#include "entrypoints/entrypoint_utils.h"
namespace art {
@@ -25,7 +25,7 @@ namespace art {
* Handle fill array data by copying appropriate part of dex file into array.
*/
extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array,
- mirror::ArtMethod* method, Thread* self)
+ ArtMethod* method, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
const uint16_t* const insns = method->GetCodeItem()->insns_;
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index eb1b105..7eb73c3 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -14,17 +14,17 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "instrumentation.h"
-#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
#include "thread-inl.h"
namespace art {
-extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod* method,
+extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method,
mirror::Object* this_object,
Thread* self,
uintptr_t lr)
@@ -45,8 +45,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod*
return result;
}
-extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self,
- StackReference<mirror::ArtMethod>* sp,
+extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp,
uint64_t gpr_result,
uint64_t fpr_result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 51817a2..de225ad 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -14,8 +14,8 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
#include "thread-inl.h"
#include "verify_object-inl.h"
@@ -35,7 +35,7 @@ extern uint32_t JniMethodStart(Thread* self) {
DCHECK(env != nullptr);
uint32_t saved_local_ref_cookie = env->local_ref_cookie;
env->local_ref_cookie = env->locals.GetSegmentState();
- mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr();
+ ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
if (!native_method->IsFastNative()) {
// When not fast JNI we transition out of runnable.
self->TransitionFromRunnableToSuspended(kNative);
@@ -50,7 +50,7 @@ extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) {
// TODO: NO_THREAD_SAFETY_ANALYSIS due to different control paths depending on fast JNI.
static void GoToRunnable(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
- mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr();
+ ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
bool is_fast = native_method->IsFastNative();
if (!is_fast) {
self->TransitionFromSuspendedToRunnable();
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 2e7e2df..bc15cc7 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -23,7 +24,6 @@
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
#include "method_reference.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/method.h"
@@ -279,10 +279,10 @@ class QuickArgumentVisitor {
// 'this' object is the 1st argument. They also have the same frame layout as the
// kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
// 1st GPR.
- static mirror::Object* GetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
+ static mirror::Object* GetProxyThisObject(ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(sp->AsMirrorPtr()->IsProxyMethod());
- CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, sp->AsMirrorPtr()->GetFrameSizeInBytes());
+ CHECK((*sp)->IsProxyMethod());
+ CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
@@ -291,28 +291,28 @@ class QuickArgumentVisitor {
return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
}
- static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
+ static ArtMethod* GetCallingMethod(ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
- uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
- return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
+ DCHECK((*sp)->IsCalleeSaveMethod());
+ uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) +
+ kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
+ return *reinterpret_cast<ArtMethod**>(previous_sp);
}
// For the given quick ref and args quick frame, return the caller's PC.
- static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
+ static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
return *reinterpret_cast<uintptr_t*>(lr);
}
- QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty,
+ QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
- + sizeof(StackReference<mirror::ArtMethod>)), // Skip StackReference<ArtMethod>.
+ + sizeof(ArtMethod*)), // Skip ArtMethod*.
gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
@@ -323,6 +323,7 @@ class QuickArgumentVisitor {
// next register is even.
static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
"Number of Quick FPR arguments not even");
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
}
virtual ~QuickArgumentVisitor() {}
@@ -354,7 +355,8 @@ class QuickArgumentVisitor {
}
bool IsSplitLongOrDouble() const {
- if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
+ if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) ||
+ (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
return is_split_long_or_double_;
} else {
return false; // An optimization for when GPR and FPRs are 64bit.
@@ -539,7 +541,7 @@ class QuickArgumentVisitor {
// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
-extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
+extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return QuickArgumentVisitor::GetProxyThisObject(sp);
}
@@ -547,9 +549,8 @@ extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::Art
// Visits arguments on the stack placing them into the shadow frame.
class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
- const char* shorty, uint32_t shorty_len, ShadowFrame* sf,
- size_t first_arg_reg) :
+ BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
+ uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -594,8 +595,7 @@ void BuildQuickShadowFrameVisitor::Visit() {
++cur_reg_;
}
-extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
// frame.
@@ -616,7 +616,8 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa
ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory));
size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
uint32_t shorty_len = 0;
- const char* shorty = method->GetShorty(&shorty_len);
+ auto* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ const char* shorty = non_proxy_method->GetShorty(&shorty_len);
BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
shadow_frame, first_arg_reg);
shadow_frame_builder.VisitArguments();
@@ -643,7 +644,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa
self->PopManagedStackFragment(fragment);
// Request a stack deoptimization if needed
- mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
+ ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
self->SetException(Thread::GetDeoptimizationException());
self->SetDeoptimizationReturnValue(result);
@@ -658,8 +659,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa
// to jobjects.
class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
- const char* shorty, uint32_t shorty_len,
+ BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
@@ -722,9 +722,8 @@ void BuildQuickArgumentVisitor::FixupReferences() {
// which is responsible for recording callee save registers. We explicitly place into jobjects the
// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
// field within the proxy object, which will box the primitive arguments and deal with error cases.
-extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
- mirror::Object* receiver,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+extern "C" uint64_t artQuickProxyInvokeHandler(
+ ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
@@ -732,7 +731,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
const char* old_cause =
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
- DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
+ DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method);
DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
<< PrettyMethod(proxy_method);
@@ -745,12 +744,12 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
// Placing arguments into args vector and remove the receiver.
- mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy();
+ ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*));
CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
<< PrettyMethod(non_proxy_method);
std::vector<jvalue> args;
uint32_t shorty_len = 0;
- const char* shorty = proxy_method->GetShorty(&shorty_len);
+ const char* shorty = non_proxy_method->GetShorty(&shorty_len);
BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args);
local_ref_visitor.VisitArguments();
@@ -758,7 +757,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
args.erase(args.begin());
// Convert proxy method into expected interface method.
- mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
+ ArtMethod* interface_method = proxy_method->FindOverriddenMethod(sizeof(void*));
DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method);
DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
self->EndAssertNoThreadSuspension(old_cause);
@@ -777,9 +776,8 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
// so they don't get garbage collected.
class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
public:
- RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
- const char* shorty, uint32_t shorty_len,
- ScopedObjectAccessUnchecked* soa) :
+ RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
+ uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -813,10 +811,8 @@ void RememberForGcArgumentVisitor::FixupReferences() {
}
// Lazily resolve a method for quick. Called by stub code.
-extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
- mirror::Object* receiver,
- Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+extern "C" const void* artQuickResolutionTrampoline(
+ ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// Start new JNI local reference state
@@ -827,7 +823,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
// Compute details about the called method (avoid GCs)
ClassLinker* linker = Runtime::Current()->GetClassLinker();
- mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
+ ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
InvokeType invoke_type;
MethodReference called_method(nullptr, 0);
const bool called_method_known_on_entry = !called->IsRuntimeMethod();
@@ -906,7 +902,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
HandleWrapper<mirror::Object> h_receiver(
hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
- called = linker->ResolveMethod(self, called_method.dex_method_index, &caller, invoke_type);
+ called = linker->ResolveMethod(self, called_method.dex_method_index, caller, invoke_type);
}
const void* code = nullptr;
if (LIKELY(!self->IsExceptionPending())) {
@@ -917,11 +913,11 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
// Refine called method based on receiver.
CHECK(receiver != nullptr) << invoke_type;
- mirror::ArtMethod* orig_called = called;
+ ArtMethod* orig_called = called;
if (invoke_type == kVirtual) {
- called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called, sizeof(void*));
} else {
- called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called, sizeof(void*));
}
CHECK(called != nullptr) << PrettyMethod(orig_called) << " "
@@ -947,8 +943,9 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
caller_method_name_and_sig_index);
}
if ((update_dex_cache_method_index != DexFile::kDexNoIndex) &&
- (caller->GetDexCacheResolvedMethod(update_dex_cache_method_index) != called)) {
- caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called);
+ (caller->GetDexCacheResolvedMethod(
+ update_dex_cache_method_index, sizeof(void*)) != called)) {
+ caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called, sizeof(void*));
}
} else if (invoke_type == kStatic) {
const auto called_dex_method_idx = called->GetDexMethodIndex();
@@ -958,7 +955,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
// b/19175856
if (called->GetDexFile() == called_method.dex_file &&
called_method.dex_method_index != called_dex_method_idx) {
- called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called);
+ called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called, sizeof(void*));
}
}
@@ -1007,7 +1004,8 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
// Fixup any locally saved objects may have moved during a GC.
visitor.FixupReferences();
// Place called method in callee-save frame to be placed as first argument to quick method.
- sp->Assign(called);
+ *sp = called;
+
return code;
}
@@ -1487,10 +1485,11 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
// is at *m = sp. Will update to point to the bottom of the save frame.
//
// Note: assumes ComputeAll() has been run before.
- void LayoutCalleeSaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp,
- HandleScope** handle_scope)
+ void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = (*m)->AsMirrorPtr();
+ ArtMethod* method = **m;
+
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
@@ -1502,22 +1501,20 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
// Under the callee saves put handle scope and new method stack reference.
size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
- size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
+ size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*);
sp8 -= scope_and_method;
// Align by kStackAlignment.
- sp8 = reinterpret_cast<uint8_t*>(RoundDown(
- reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
+ sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
- uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
+ uint8_t* sp8_table = sp8 + sizeof(ArtMethod*);
*handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(),
num_handle_scope_references_);
// Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
uint8_t* method_pointer = sp8;
- StackReference<mirror::ArtMethod>* new_method_ref =
- reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer);
- new_method_ref->Assign(method);
+ auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer);
+ *new_method_ref = method;
*m = new_method_ref;
}
@@ -1529,8 +1526,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
// Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
// Returns the new bottom. Note: this may be unaligned.
- uint8_t* LayoutJNISaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp,
- HandleScope** handle_scope)
+ uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// First, fix up the layout of the callee-save frame.
// We have to squeeze in the HandleScope, and relocate the method pointer.
@@ -1546,9 +1542,9 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
}
// WARNING: After this, *sp won't be pointing to the method anymore!
- uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m,
- const char* shorty, uint32_t shorty_len, HandleScope** handle_scope,
- uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
+ uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
+ HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
+ uint32_t** start_fpr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
@@ -1637,7 +1633,7 @@ class FillNativeCall {
class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
public:
BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len,
- StackReference<mirror::ArtMethod>** sp)
+ ArtMethod*** sp)
: QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) {
ComputeGenericJniFrameSize fsc;
@@ -1655,7 +1651,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass());
+ sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
}
}
@@ -1811,10 +1807,9 @@ void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock)
* 1) How many bytes of the alloca can be released, if the value is non-negative.
* 2) An error, if the value is negative.
*/
-extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* called = sp->AsMirrorPtr();
+ ArtMethod* called = *sp;
DCHECK(called->IsNative()) << PrettyMethod(called, true);
uint32_t shorty_len = 0;
const char* shorty = called->GetShorty(&shorty_len);
@@ -1887,15 +1882,15 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self,
*/
extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- StackReference<mirror::ArtMethod>* sp = self->GetManagedStack()->GetTopQuickFrame();
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
- mirror::ArtMethod* called = sp->AsMirrorPtr();
+ ArtMethod* called = *sp;
uint32_t cookie = *(sp32 - 1);
jobject lock = nullptr;
if (called->IsSynchronized()) {
HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp)
- + sizeof(StackReference<mirror::ArtMethod>));
+ + sizeof(*sp));
lock = table->GetHandle(0).ToJObject();
}
@@ -1947,17 +1942,14 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result,
template<InvokeType type, bool access_check>
static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self, StackReference<mirror::ArtMethod>* sp);
+ ArtMethod* caller_method, Thread* self, ArtMethod** sp);
template<InvokeType type, bool access_check>
static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self, StackReference<mirror::ArtMethod>* sp) {
+ ArtMethod* caller_method, Thread* self, ArtMethod** sp) {
ScopedQuickEntrypointChecks sqec(self);
- DCHECK_EQ(sp->AsMirrorPtr(), Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
- mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
- type);
+ DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
+ ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
if (UNLIKELY(method == nullptr)) {
const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
uint32_t shorty_len;
@@ -1994,9 +1986,9 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_o
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx, \
mirror::Object* this_object, \
- mirror::ArtMethod* caller_method, \
+ ArtMethod* caller_method, \
Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ ArtMethod** sp) \
EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
@@ -2013,8 +2005,7 @@ EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
// See comments in runtime_support_asm.S
extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ ArtMethod* caller_method, Thread* self, ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kInterface, true>(method_idx, this_object,
caller_method, self, sp);
@@ -2022,8 +2013,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ ArtMethod* caller_method, Thread* self, ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method,
self, sp);
@@ -2031,8 +2021,7 @@ extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ ArtMethod* caller_method, Thread* self, ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method,
self, sp);
@@ -2040,8 +2029,7 @@ extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ ArtMethod* caller_method, Thread* self, ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method,
self, sp);
@@ -2049,31 +2037,31 @@ extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ ArtMethod* caller_method, Thread* self, ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method,
self, sp);
}
// Determine target of interface dispatch. This object is known non-null.
-extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
+extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method,
mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
+ ArtMethod* caller_method,
Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtMethod* method;
+ ArtMethod* method;
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
- method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
+ method = this_object->GetClass()->FindVirtualMethodForInterface(
+ interface_method, sizeof(void*));
if (UNLIKELY(method == nullptr)) {
- ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
- caller_method);
+ ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
+ interface_method, this_object, caller_method);
return GetTwoWordFailureValue(); // Failure.
}
} else {
- DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
+ DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod());
// Find the caller PC.
constexpr size_t pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsAndArgs);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 01c17ac..5cdf967 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -16,9 +16,9 @@
#include <stdint.h>
+#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "common_runtime_test.h"
-#include "mirror/art_method-inl.h"
#include "quick/quick_method_frame_info.h"
namespace art {
@@ -31,8 +31,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
options->push_back(std::make_pair("imageinstructionset", "x86_64"));
}
- static mirror::ArtMethod* CreateCalleeSaveMethod(InstructionSet isa,
- Runtime::CalleeSaveType type)
+ static ArtMethod* CreateCalleeSaveMethod(InstructionSet isa, Runtime::CalleeSaveType type)
NO_THREAD_SAFETY_ANALYSIS {
Runtime* r = Runtime::Current();
@@ -40,7 +39,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
+ ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down.
@@ -50,7 +49,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
- mirror::ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
+ ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
@@ -59,7 +58,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
static void CheckPCOffset(InstructionSet isa, Runtime::CalleeSaveType type, size_t pc_offset)
NO_THREAD_SAFETY_ANALYSIS {
- mirror::ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
+ ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
EXPECT_EQ(save_method->GetReturnPcOffset().SizeValue(), pc_offset)
<< "Expected and real pc offset differs for " << type
diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h
index bfe7ee8..8209dc8 100644
--- a/runtime/entrypoints/runtime_asm_entrypoints.h
+++ b/runtime/entrypoints/runtime_asm_entrypoints.h
@@ -29,19 +29,19 @@ static inline const void* GetJniDlsymLookupStub() {
}
// Return the address of quick stub code for handling IMT conflicts.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
+extern "C" void art_quick_imt_conflict_trampoline(ArtMethod*);
static inline const void* GetQuickImtConflictStub() {
return reinterpret_cast<const void*>(art_quick_imt_conflict_trampoline);
}
// Return the address of quick stub code for bridging from quick code to the interpreter.
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
+extern "C" void art_quick_to_interpreter_bridge(ArtMethod*);
static inline const void* GetQuickToInterpreterBridge() {
return reinterpret_cast<const void*>(art_quick_to_interpreter_bridge);
}
// Return the address of quick stub code for handling JNI calls.
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
+extern "C" void art_quick_generic_jni_trampoline(ArtMethod*);
static inline const void* GetQuickGenericJniStub() {
return reinterpret_cast<const void*>(art_quick_generic_jni_trampoline);
}
@@ -53,7 +53,7 @@ static inline const void* GetQuickProxyInvokeHandler() {
}
// Return the address of quick stub code for resolving a method at first call.
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
+extern "C" void art_quick_resolution_trampoline(ArtMethod*);
static inline const void* GetQuickResolutionStub() {
return reinterpret_cast<const void*>(art_quick_resolution_trampoline);
}
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 6808000..bc3ba21 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -96,11 +96,11 @@ class ExceptionTest : public CommonRuntimeTest {
CHECK_EQ(mapping_table_offset & 1u, 0u);
const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset];
- method_f_ = my_klass_->FindVirtualMethod("f", "()I");
+ method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*));
ASSERT_TRUE(method_f_ != nullptr);
method_f_->SetEntryPointFromQuickCompiledCode(code_ptr);
- method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
+ method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", sizeof(void*));
ASSERT_TRUE(method_g_ != nullptr);
method_g_->SetEntryPointFromQuickCompiledCode(code_ptr);
}
@@ -113,8 +113,8 @@ class ExceptionTest : public CommonRuntimeTest {
std::vector<uint8_t> fake_gc_map_;
std::vector<uint8_t> fake_header_code_and_maps_;
- mirror::ArtMethod* method_f_;
- mirror::ArtMethod* method_g_;
+ ArtMethod* method_f_;
+ ArtMethod* method_g_;
private:
mirror::Class* my_klass_;
@@ -167,7 +167,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
std::vector<uintptr_t> fake_stack;
Runtime* r = Runtime::Current();
r->SetInstructionSet(kRuntimeISA);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
+ ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
@@ -209,14 +209,13 @@ TEST_F(ExceptionTest, StackTraceElement) {
fake_stack.push_back(0);
// Set up thread to appear as if we called out of method_g_ at pc dex 3
- thread->SetTopOfStack(reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0]));
+ thread->SetTopOfStack(reinterpret_cast<ArtMethod**>(&fake_stack[0]));
jobject internal = thread->CreateInternalStackTrace<false>(soa);
ASSERT_TRUE(internal != nullptr);
jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal);
ASSERT_TRUE(ste_array != nullptr);
- mirror::ObjectArray<mirror::StackTraceElement>* trace_array =
- soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array);
+ auto* trace_array = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array);
ASSERT_TRUE(trace_array != nullptr);
ASSERT_TRUE(trace_array->Get(0) != nullptr);
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 83f3ae1..4a352dd 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -19,8 +19,9 @@
#include <setjmp.h>
#include <sys/mman.h>
#include <sys/ucontext.h>
+
+#include "art_method-inl.h"
#include "base/stl_util.h"
-#include "mirror/art_method.h"
#include "mirror/class.h"
#include "sigchain.h"
#include "thread-inl.h"
@@ -321,7 +322,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
return false;
}
- mirror::ArtMethod* method_obj = 0;
+ ArtMethod* method_obj = 0;
uintptr_t return_pc = 0;
uintptr_t sp = 0;
@@ -331,6 +332,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
+ // TODO: Check linear alloc and image.
if (method_obj == 0 || !IsAligned<kObjectAlignment>(method_obj)) {
VLOG(signals) << "no method";
return false;
@@ -341,7 +343,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
// Check that the class pointer inside the object is not null and is aligned.
// TODO: Method might be not a heap address, and GetClass could fault.
// No read barrier because method_obj may not be a real object.
- mirror::Class* cls = method_obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ mirror::Class* cls = method_obj->GetDeclaringClassNoBarrier();
if (cls == nullptr) {
VLOG(signals) << "not a class";
return false;
@@ -357,12 +359,6 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
return false;
}
- // Now make sure the class is a mirror::ArtMethod.
- if (!cls->IsArtMethodClass()) {
- VLOG(signals) << "not a method";
- return false;
- }
-
// We can be certain that this is a method now. Check if we have a GC map
// at the return PC address.
if (true || kIsDebugBuild) {
@@ -418,16 +414,14 @@ bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
#endif
if (in_generated_code) {
LOG(ERROR) << "Dumping java stack trace for crash in generated code";
- mirror::ArtMethod* method = nullptr;
+ ArtMethod* method = nullptr;
uintptr_t return_pc = 0;
uintptr_t sp = 0;
Thread* self = Thread::Current();
manager_->GetMethodAndReturnPcAndSp(siginfo, context, &method, &return_pc, &sp);
// Inside of generated code, sp[0] is the method, so sp is the frame.
- StackReference<mirror::ArtMethod>* frame =
- reinterpret_cast<StackReference<mirror::ArtMethod>*>(sp);
- self->SetTopOfStack(frame);
+ self->SetTopOfStack(reinterpret_cast<ArtMethod**>(sp));
#ifdef TEST_NESTED_SIGNAL
// To test the nested signal handler we raise a signal here. This will cause the
// nested signal handler to be called and perform a longjmp back to the setjmp
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index adac4c2..3b03a14 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -27,10 +27,7 @@
namespace art {
-namespace mirror {
class ArtMethod;
-} // namespace mirror
-
class FaultHandler;
class FaultManager {
@@ -58,7 +55,7 @@ class FaultManager {
// The IsInGeneratedCode() function checks that the mutator lock is held before it
// calls GetMethodAndReturnPCAndSP().
// TODO: think about adding lock assertions and fake lock and unlock functions.
- void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, mirror::ArtMethod** out_method,
+ void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp)
NO_THREAD_SAFETY_ANALYSIS;
bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc)
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 043b558..363b76a 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -198,12 +198,12 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) {
obj1->Set(1, other_space_ref1);
obj2->Set(3, other_space_ref2);
table->ClearCards();
- std::set<mirror::Object*> visited;
- table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited);
+ std::set<mirror::Object*> visited_before;
+ table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited_before);
// Check that we visited all the references in other spaces only.
- ASSERT_GE(visited.size(), 2u);
- ASSERT_TRUE(visited.find(other_space_ref1) != visited.end());
- ASSERT_TRUE(visited.find(other_space_ref2) != visited.end());
+ ASSERT_GE(visited_before.size(), 2u);
+ ASSERT_TRUE(visited_before.find(other_space_ref1) != visited_before.end());
+ ASSERT_TRUE(visited_before.find(other_space_ref2) != visited_before.end());
// Verify that all the other references were visited.
// obj1, obj2 cards should still be in mod union table since they have references to other
// spaces.
@@ -229,12 +229,15 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) {
ASSERT_TRUE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(ptr)));
}
// Visit again and make sure the cards got cleared back to their sane state.
- visited.clear();
- table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited);
- // Verify that the dump matches what we saw earlier.
+ std::set<mirror::Object*> visited_after;
+ table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited_after);
+ // Check that we visited a superset after.
+ for (auto* obj : visited_before) {
+ ASSERT_TRUE(visited_after.find(obj) != visited_after.end()) << obj;
+ }
+ // Verify that the dump still works.
std::ostringstream oss2;
table->Dump(oss2);
- ASSERT_EQ(oss.str(), oss2.str());
// Remove the space we added so it doesn't persist to the next test.
heap->RemoveSpace(other_space.get());
}
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 84dadea..fe2b284 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -21,7 +21,7 @@
#include "dex_file-inl.h"
#include "mem_map.h"
#include "mirror/object-inl.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "mirror/object_array.h"
namespace art {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 26f349a..658390d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -22,6 +22,7 @@
#include "gc/space/image_space.h"
#include "gc/space/space.h"
#include "intern_table.h"
+#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 2a9c03d..1c9c412 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -401,7 +401,8 @@ class MarkSweepMarkObjectSlowPath {
<< (field != nullptr ? field->GetTypeDescriptor() : "")
<< " first_ref_field_offset="
<< (holder_->IsClass()
- ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset()
+ ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
+ sizeof(void*))
: holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
<< " num_of_ref_fields="
<< (holder_->IsClass()
@@ -589,7 +590,8 @@ void MarkSweep::MarkNonThreadRoots() {
void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Visit all runtime roots and clear dirty flags.
- Runtime::Current()->VisitConcurrentRoots(this, flags);
+ Runtime::Current()->VisitConcurrentRoots(
+ this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving));
}
class ScanObjectVisitor {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index eb0e9be..2d54330 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -30,7 +30,6 @@
#include "gc/space/rosalloc_space-inl.h"
#include "runtime.h"
#include "handle_scope-inl.h"
-#include "thread.h"
#include "thread-inl.h"
#include "utils.h"
#include "verify_object-inl.h"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index fbde494..59d0259 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1630,7 +1630,12 @@ size_t Heap::GetObjectsAllocated() const {
}
uint64_t Heap::GetObjectsAllocatedEver() const {
- return GetObjectsFreedEver() + GetObjectsAllocated();
+ uint64_t total = GetObjectsFreedEver();
+ // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
+ if (Thread::Current() != nullptr) {
+ total += GetObjectsAllocated();
+ }
+ return total;
}
uint64_t Heap::GetBytesAllocatedEver() const {
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 5af2a53..4d51d38 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -17,8 +17,8 @@
#include "reference_processor.h"
#include "base/time_utils.h"
+#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
-#include "mirror/reference.h"
#include "mirror/reference-inl.h"
#include "reference_processor-inl.h"
#include "reflection.h"
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index ade9cec..437fd8c 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -23,13 +23,13 @@
#include <random>
+#include "art_method.h"
#include "base/macros.h"
#include "base/stl_util.h"
#include "base/scoped_flock.h"
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
#include "gc/accounting/space_bitmap-inl.h"
-#include "mirror/art_method.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "oat_file.h"
@@ -687,7 +687,20 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
image_file_size, image_header.GetImageSize());
return nullptr;
}
- auto end_of_bitmap = image_header.GetImageBitmapOffset() + image_header.GetImageBitmapSize();
+
+ if (kIsDebugBuild) {
+ LOG(INFO) << "Dumping image sections";
+ for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
+ const auto section_idx = static_cast<ImageHeader::ImageSections>(i);
+ auto& section = image_header.GetImageSection(section_idx);
+ LOG(INFO) << section_idx << " start="
+ << reinterpret_cast<void*>(image_header.GetImageBegin() + section.Offset())
+ << section;
+ }
+ }
+
+ const auto& bitmap_section = image_header.GetImageSection(ImageHeader::kSectionImageBitmap);
+ auto end_of_bitmap = static_cast<size_t>(bitmap_section.End());
if (end_of_bitmap != image_file_size) {
*error_msg = StringPrintf(
"Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.", image_file_size,
@@ -697,7 +710,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
// Note: The image header is part of the image due to mmap page alignment required of offset.
std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(
- image_header.GetImageBegin(), image_header.GetImageSize() + image_header.GetArtFieldsSize(),
+ image_header.GetImageBegin(), image_header.GetImageSize(),
PROT_READ | PROT_WRITE, MAP_PRIVATE, file->Fd(), 0, false, image_filename, error_msg));
if (map.get() == nullptr) {
DCHECK(!error_msg->empty());
@@ -706,13 +719,9 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
CHECK_EQ(image_header.GetImageBegin(), map->Begin());
DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
- std::unique_ptr<MemMap> image_map(
- MemMap::MapFileAtAddress(nullptr, image_header.GetImageBitmapSize(),
- PROT_READ, MAP_PRIVATE,
- file->Fd(), image_header.GetImageBitmapOffset(),
- false,
- image_filename,
- error_msg));
+ std::unique_ptr<MemMap> image_map(MemMap::MapFileAtAddress(
+ nullptr, bitmap_section.Size(), PROT_READ, MAP_PRIVATE, file->Fd(),
+ bitmap_section.Offset(), false, image_filename, error_msg));
if (image_map.get() == nullptr) {
*error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
return nullptr;
@@ -729,7 +738,9 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
return nullptr;
}
- uint8_t* const image_end = map->Begin() + image_header.GetImageSize();
+ // We only want the mirror object, not the ArtFields and ArtMethods.
+ uint8_t* const image_end =
+ map->Begin() + image_header.GetImageSection(ImageHeader::kSectionObjects).End();
std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename, image_location,
map.release(), bitmap.release(), image_end));
@@ -753,25 +764,16 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
Runtime* runtime = Runtime::Current();
runtime->SetInstructionSet(space->oat_file_->GetOatHeader().GetInstructionSet());
- mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
- runtime->SetResolutionMethod(down_cast<mirror::ArtMethod*>(resolution_method));
- mirror::Object* imt_conflict_method = image_header.GetImageRoot(ImageHeader::kImtConflictMethod);
- runtime->SetImtConflictMethod(down_cast<mirror::ArtMethod*>(imt_conflict_method));
- mirror::Object* imt_unimplemented_method =
- image_header.GetImageRoot(ImageHeader::kImtUnimplementedMethod);
- runtime->SetImtUnimplementedMethod(down_cast<mirror::ArtMethod*>(imt_unimplemented_method));
- mirror::Object* default_imt = image_header.GetImageRoot(ImageHeader::kDefaultImt);
- runtime->SetDefaultImt(down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(default_imt));
-
- mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method),
- Runtime::kSaveAll);
- callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method),
- Runtime::kRefsOnly);
- callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method),
- Runtime::kRefsAndArgs);
+ runtime->SetResolutionMethod(image_header.GetImageMethod(ImageHeader::kResolutionMethod));
+ runtime->SetImtConflictMethod(image_header.GetImageMethod(ImageHeader::kImtConflictMethod));
+ runtime->SetImtUnimplementedMethod(
+ image_header.GetImageMethod(ImageHeader::kImtUnimplementedMethod));
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kCalleeSaveMethod), Runtime::kSaveAll);
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kRefsOnlySaveMethod), Runtime::kRefsOnly);
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod), Runtime::kRefsAndArgs);
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "ImageSpace::Init exiting (" << PrettyDuration(NanoTime() - start_time)
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 3e9e9f7..6e0e0d2 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -23,6 +23,8 @@
#include "common_runtime_test.h"
#include "globals.h"
#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "scoped_thread_state_change.h"
#include "zygote_space.h"
diff --git a/runtime/globals.h b/runtime/globals.h
index 4d7fd2e..fe699c6 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -67,12 +67,8 @@ static constexpr bool kUseOptimizingCompiler = false;
// Garbage collector constants.
static constexpr bool kMovingCollector = true;
static constexpr bool kMarkCompactSupport = false && kMovingCollector;
-// True if we allow moving field arrays, this can cause complication with mark compact.
-static constexpr bool kMoveFieldArrays = !kMarkCompactSupport;
// True if we allow moving classes.
static constexpr bool kMovingClasses = !kMarkCompactSupport;
-// True if we allow moving methods.
-static constexpr bool kMovingMethods = false;
// If true, the quick compiler embeds class pointers in the compiled
// code, if possible.
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index ac28c8a..9a0e52e 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -159,6 +159,10 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope {
ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Thread* Self() const {
+ return self_;
+ }
+
private:
template<class T>
ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/image.cc b/runtime/image.cc
index d9bd2a8..947c914 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,27 +24,21 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '5', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '6', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
- uint32_t art_fields_offset,
- uint32_t art_fields_size,
- uint32_t image_bitmap_offset,
- uint32_t image_bitmap_size,
+ ImageSection* sections,
uint32_t image_roots,
uint32_t oat_checksum,
uint32_t oat_file_begin,
uint32_t oat_data_begin,
uint32_t oat_data_end,
uint32_t oat_file_end,
+ uint32_t pointer_size,
bool compile_pic)
: image_begin_(image_begin),
image_size_(image_size),
- art_fields_offset_(art_fields_offset),
- art_fields_size_(art_fields_size),
- image_bitmap_offset_(image_bitmap_offset),
- image_bitmap_size_(image_bitmap_size),
oat_checksum_(oat_checksum),
oat_file_begin_(oat_file_begin),
oat_data_begin_(oat_data_begin),
@@ -52,6 +46,7 @@ ImageHeader::ImageHeader(uint32_t image_begin,
oat_file_end_(oat_file_end),
patch_delta_(0),
image_roots_(image_roots),
+ pointer_size_(pointer_size),
compile_pic_(compile_pic) {
CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
@@ -61,8 +56,10 @@ ImageHeader::ImageHeader(uint32_t image_begin,
CHECK_LE(oat_file_begin, oat_data_begin);
CHECK_LT(oat_data_begin, oat_data_end);
CHECK_LE(oat_data_end, oat_file_end);
+ CHECK(ValidPointerSize(pointer_size_)) << pointer_size_;
memcpy(magic_, kImageMagic, sizeof(kImageMagic));
memcpy(version_, kImageVersion, sizeof(kImageVersion));
+ std::copy_n(sections, kSectionCount, sections_);
}
void ImageHeader::RelocateImage(off_t delta) {
@@ -74,6 +71,9 @@ void ImageHeader::RelocateImage(off_t delta) {
oat_file_end_ += delta;
image_roots_ += delta;
patch_delta_ += delta;
+ for (size_t i = 0; i < kImageMethodsCount; ++i) {
+ image_methods_[i] += delta;
+ }
}
bool ImageHeader::IsValid() const {
@@ -128,4 +128,23 @@ mirror::ObjectArray<mirror::Object>* ImageHeader::GetImageRoots() const {
return result;
}
+ArtMethod* ImageHeader::GetImageMethod(ImageMethod index) const {
+ CHECK_LT(static_cast<size_t>(index), kImageMethodsCount);
+ return reinterpret_cast<ArtMethod*>(image_methods_[index]);
+}
+
+void ImageHeader::SetImageMethod(ImageMethod index, ArtMethod* method) {
+ CHECK_LT(static_cast<size_t>(index), kImageMethodsCount);
+ image_methods_[index] = reinterpret_cast<uint64_t>(method);
+}
+
+const ImageSection& ImageHeader::GetImageSection(ImageSections index) const {
+ CHECK_LT(static_cast<size_t>(index), kSectionCount);
+ return sections_[index];
+}
+
+std::ostream& operator<<(std::ostream& os, const ImageSection& section) {
+ return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End();
+}
+
} // namespace art
diff --git a/runtime/image.h b/runtime/image.h
index 52995ed..c6be7ef 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -24,6 +24,34 @@
namespace art {
+class PACKED(4) ImageSection {
+ public:
+ ImageSection() : offset_(0), size_(0) { }
+ ImageSection(uint32_t offset, uint32_t size) : offset_(offset), size_(size) { }
+ ImageSection(const ImageSection& section) = default;
+ ImageSection& operator=(const ImageSection& section) = default;
+
+ uint32_t Offset() const {
+ return offset_;
+ }
+
+ uint32_t Size() const {
+ return size_;
+ }
+
+ uint32_t End() const {
+ return Offset() + Size();
+ }
+
+ bool Contains(uint64_t offset) const {
+ return offset - offset_ < size_;
+ }
+
+ private:
+ uint32_t offset_;
+ uint32_t size_;
+};
+
// header of image files written by ImageWriter, read and validated by Space.
class PACKED(4) ImageHeader {
public:
@@ -31,16 +59,14 @@ class PACKED(4) ImageHeader {
ImageHeader(uint32_t image_begin,
uint32_t image_size_,
- uint32_t art_fields_offset,
- uint32_t art_fields_size,
- uint32_t image_bitmap_offset,
- uint32_t image_bitmap_size,
+ ImageSection* sections,
uint32_t image_roots,
uint32_t oat_checksum,
uint32_t oat_file_begin,
uint32_t oat_data_begin,
uint32_t oat_data_end,
uint32_t oat_file_end,
+ uint32_t pointer_size,
bool compile_pic_);
bool IsValid() const;
@@ -54,22 +80,6 @@ class PACKED(4) ImageHeader {
return static_cast<uint32_t>(image_size_);
}
- size_t GetArtFieldsOffset() const {
- return art_fields_offset_;
- }
-
- size_t GetArtFieldsSize() const {
- return art_fields_size_;
- }
-
- size_t GetImageBitmapOffset() const {
- return image_bitmap_offset_;
- }
-
- size_t GetImageBitmapSize() const {
- return image_bitmap_size_;
- }
-
uint32_t GetOatChecksum() const {
return oat_checksum_;
}
@@ -94,6 +104,10 @@ class PACKED(4) ImageHeader {
return reinterpret_cast<uint8_t*>(oat_file_end_);
}
+ uint32_t GetPointerSize() const {
+ return pointer_size_;
+ }
+
off_t GetPatchDelta() const {
return patch_delta_;
}
@@ -108,19 +122,38 @@ class PACKED(4) ImageHeader {
return oat_filename;
}
- enum ImageRoot {
+ enum ImageMethod {
kResolutionMethod,
kImtConflictMethod,
kImtUnimplementedMethod,
- kDefaultImt,
kCalleeSaveMethod,
kRefsOnlySaveMethod,
kRefsAndArgsSaveMethod,
+ kImageMethodsCount, // Number of elements in enum.
+ };
+
+ enum ImageRoot {
kDexCaches,
kClassRoots,
kImageRootsMax,
};
+ enum ImageSections {
+ kSectionObjects,
+ kSectionArtFields,
+ kSectionArtMethods,
+ kSectionImageBitmap,
+ kSectionCount, // Number of elements in enum.
+ };
+
+ ArtMethod* GetImageMethod(ImageMethod index) const;
+ void SetImageMethod(ImageMethod index, ArtMethod* method);
+
+ const ImageSection& GetImageSection(ImageSections index) const;
+ const ImageSection& GetMethodsSection() const {
+ return GetImageSection(kSectionArtMethods);
+ }
+
mirror::Object* GetImageRoot(ImageRoot image_root) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetImageRoots() const
@@ -145,18 +178,6 @@ class PACKED(4) ImageHeader {
// Image size, not page aligned.
uint32_t image_size_;
- // ArtField array offset.
- uint32_t art_fields_offset_;
-
- // ArtField size in bytes.
- uint32_t art_fields_size_;
-
- // Image bitmap offset in the file.
- uint32_t image_bitmap_offset_;
-
- // Size of the image bitmap.
- uint32_t image_bitmap_size_;
-
// Checksum of the oat file we link to for load time sanity check.
uint32_t oat_checksum_;
@@ -179,12 +200,26 @@ class PACKED(4) ImageHeader {
// Absolute address of an Object[] of objects needed to reinitialize from an image.
uint32_t image_roots_;
+ // Pointer size, this affects the size of the ArtMethods.
+ uint32_t pointer_size_;
+
// Boolean (0 or 1) to denote if the image was compiled with --compile-pic option
const uint32_t compile_pic_;
+ // Image sections
+ ImageSection sections_[kSectionCount];
+
+ // Image methods.
+ uint64_t image_methods_[kImageMethodsCount];
+
friend class ImageWriter;
};
+std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageMethod& policy);
+std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageRoot& policy);
+std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageSections& section);
+std::ostream& operator<<(std::ostream& os, const ImageSection& section);
+
} // namespace art
#endif // ART_RUNTIME_IMAGE_H_
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 39d850f..f70503d 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -19,6 +19,7 @@
#include "indirect_reference_table.h"
+#include "gc_root-inl.h"
#include "runtime-inl.h"
#include "verify_object-inl.h"
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 98e6200..4ced23d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -19,6 +19,7 @@
#include <sstream>
#include "arch/context.h"
+#include "art_method-inl.h"
#include "atomic.h"
#include "class_linker.h"
#include "debugger.h"
@@ -30,7 +31,6 @@
#include "interpreter/interpreter.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
@@ -78,15 +78,15 @@ void Instrumentation::InstallStubsForClass(mirror::Class* klass) {
// could not be initialized or linked with regards to class inheritance.
} else {
for (size_t i = 0, e = klass->NumDirectMethods(); i < e; i++) {
- InstallStubsForMethod(klass->GetDirectMethod(i));
+ InstallStubsForMethod(klass->GetDirectMethod(i, sizeof(void*)));
}
for (size_t i = 0, e = klass->NumVirtualMethods(); i < e; i++) {
- InstallStubsForMethod(klass->GetVirtualMethod(i));
+ InstallStubsForMethod(klass->GetVirtualMethod(i, sizeof(void*)));
}
}
}
-static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code)
+static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Runtime* const runtime = Runtime::Current();
jit::Jit* jit = runtime->GetJit();
@@ -114,7 +114,7 @@ static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code)
}
}
-void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) {
+void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
if (method->IsAbstract() || method->IsProxyMethod()) {
// Do not change stubs for these methods.
return;
@@ -175,7 +175,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
@@ -319,7 +319,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
if (instrumentation_stack_->size() == 0) {
return false; // Stop.
}
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (GetCurrentQuickFrame() == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId()
@@ -656,7 +656,7 @@ void Instrumentation::ResetQuickAllocEntryPoints() {
}
}
-void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code) {
+void Instrumentation::UpdateMethodsCode(ArtMethod* method, const void* quick_code) {
DCHECK(method->GetDeclaringClass()->IsResolved());
const void* new_quick_code;
if (LIKELY(!instrumentation_stubs_installed_)) {
@@ -679,67 +679,42 @@ void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* q
UpdateEntrypoints(method, new_quick_code);
}
-bool Instrumentation::AddDeoptimizedMethod(mirror::ArtMethod* method) {
- // Note that the insert() below isn't read barrier-aware. So, this
- // FindDeoptimizedMethod() call is necessary or else we would end up
- // storing the same method twice in the map (the from-space and the
- // to-space ones).
- if (FindDeoptimizedMethod(method)) {
+bool Instrumentation::AddDeoptimizedMethod(ArtMethod* method) {
+ if (IsDeoptimizedMethod(method)) {
// Already in the map. Return.
return false;
}
// Not found. Add it.
- static_assert(!kMovingMethods, "Not safe if methods can move");
- int32_t hash_code = method->IdentityHashCode();
- deoptimized_methods_.insert(std::make_pair(hash_code, GcRoot<mirror::ArtMethod>(method)));
+ deoptimized_methods_.insert(method);
return true;
}
-bool Instrumentation::FindDeoptimizedMethod(mirror::ArtMethod* method) {
- static_assert(!kMovingMethods, "Not safe if methods can move");
- int32_t hash_code = method->IdentityHashCode();
- auto range = deoptimized_methods_.equal_range(hash_code);
- for (auto it = range.first; it != range.second; ++it) {
- mirror::ArtMethod* m = it->second.Read();
- if (m == method) {
- // Found.
- return true;
- }
- }
- // Not found.
- return false;
+bool Instrumentation::IsDeoptimizedMethod(ArtMethod* method) {
+ return deoptimized_methods_.find(method) != deoptimized_methods_.end();
}
-mirror::ArtMethod* Instrumentation::BeginDeoptimizedMethod() {
- auto it = deoptimized_methods_.begin();
- if (it == deoptimized_methods_.end()) {
+ArtMethod* Instrumentation::BeginDeoptimizedMethod() {
+ if (deoptimized_methods_.empty()) {
// Empty.
return nullptr;
}
- return it->second.Read();
+ return *deoptimized_methods_.begin();
}
-bool Instrumentation::RemoveDeoptimizedMethod(mirror::ArtMethod* method) {
- static_assert(!kMovingMethods, "Not safe if methods can move");
- int32_t hash_code = method->IdentityHashCode();
- auto range = deoptimized_methods_.equal_range(hash_code);
- for (auto it = range.first; it != range.second; ++it) {
- mirror::ArtMethod* m = it->second.Read();
- if (m == method) {
- // Found. Erase and return.
- deoptimized_methods_.erase(it);
- return true;
- }
+bool Instrumentation::RemoveDeoptimizedMethod(ArtMethod* method) {
+ auto it = deoptimized_methods_.find(method);
+ if (it == deoptimized_methods_.end()) {
+ return false;
}
- // Not found.
- return false;
+ deoptimized_methods_.erase(it);
+ return true;
}
bool Instrumentation::IsDeoptimizedMethodsEmpty() const {
return deoptimized_methods_.empty();
}
-void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
+void Instrumentation::Deoptimize(ArtMethod* method) {
CHECK(!method->IsNative());
CHECK(!method->IsProxyMethod());
CHECK(!method->IsAbstract());
@@ -762,7 +737,7 @@ void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
}
}
-void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
+void Instrumentation::Undeoptimize(ArtMethod* method) {
CHECK(!method->IsNative());
CHECK(!method->IsProxyMethod());
CHECK(!method->IsAbstract());
@@ -798,10 +773,10 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
}
}
-bool Instrumentation::IsDeoptimized(mirror::ArtMethod* method) {
+bool Instrumentation::IsDeoptimized(ArtMethod* method) {
DCHECK(method != nullptr);
ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
- return FindDeoptimizedMethod(method);
+ return IsDeoptimizedMethod(method);
}
void Instrumentation::EnableDeoptimization() {
@@ -819,7 +794,7 @@ void Instrumentation::DisableDeoptimization(const char* key) {
}
// Undeoptimized selected methods.
while (true) {
- mirror::ArtMethod* method;
+ ArtMethod* method;
{
ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
if (IsDeoptimizedMethodsEmpty()) {
@@ -866,7 +841,7 @@ void Instrumentation::DisableMethodTracing(const char* key) {
ConfigureStubs(key, InstrumentationLevel::kInstrumentNothing);
}
-const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method, size_t pointer_size) const {
+const void* Instrumentation::GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const {
Runtime* runtime = Runtime::Current();
if (LIKELY(!instrumentation_stubs_installed_)) {
const void* code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
@@ -883,7 +858,7 @@ const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method, size_t p
}
void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method,
+ ArtMethod* method,
uint32_t dex_pc) const {
auto it = method_entry_listeners_.begin();
bool is_end = (it == method_entry_listeners_.end());
@@ -897,7 +872,7 @@ void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_
}
void Instrumentation::MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method,
+ ArtMethod* method,
uint32_t dex_pc, const JValue& return_value) const {
auto it = method_exit_listeners_.begin();
bool is_end = (it == method_exit_listeners_.end());
@@ -911,7 +886,7 @@ void Instrumentation::MethodExitEventImpl(Thread* thread, mirror::Object* this_o
}
void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method,
+ ArtMethod* method,
uint32_t dex_pc) const {
if (HasMethodUnwindListeners()) {
for (InstrumentationListener* listener : method_unwind_listeners_) {
@@ -921,7 +896,7 @@ void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_obj
}
void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method,
+ ArtMethod* method,
uint32_t dex_pc) const {
std::shared_ptr<std::list<InstrumentationListener*>> original(dex_pc_listeners_);
for (InstrumentationListener* listener : *original.get()) {
@@ -929,7 +904,7 @@ void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_o
}
}
-void Instrumentation::BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method,
+void Instrumentation::BackwardBranchImpl(Thread* thread, ArtMethod* method,
int32_t offset) const {
for (InstrumentationListener* listener : backward_branch_listeners_) {
listener->BackwardBranch(thread, method, offset);
@@ -937,7 +912,7 @@ void Instrumentation::BackwardBranchImpl(Thread* thread, mirror::ArtMethod* meth
}
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
+ ArtMethod* method, uint32_t dex_pc,
ArtField* field) const {
std::shared_ptr<std::list<InstrumentationListener*>> original(field_read_listeners_);
for (InstrumentationListener* listener : *original.get()) {
@@ -946,7 +921,7 @@ void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_ob
}
void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
+ ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const {
std::shared_ptr<std::list<InstrumentationListener*>> original(field_write_listeners_);
for (InstrumentationListener* listener : *original.get()) {
@@ -980,7 +955,7 @@ static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instr
}
void Instrumentation::PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
- mirror::ArtMethod* method,
+ ArtMethod* method,
uintptr_t lr, bool interpreter_entry) {
// We have a callee-save frame meaning this value is guaranteed to never be 0.
size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk);
@@ -1011,7 +986,7 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt
CheckStackDepth(self, instrumentation_frame, 0);
self->VerifyStack();
- mirror::ArtMethod* method = instrumentation_frame.method_;
+ ArtMethod* method = instrumentation_frame.method_;
uint32_t length;
char return_shorty = method->GetShorty(&length)[0];
JValue return_value;
@@ -1064,7 +1039,7 @@ void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) c
// TODO: bring back CheckStackDepth(self, instrumentation_frame, 2);
stack->pop_front();
- mirror::ArtMethod* method = instrumentation_frame.method_;
+ ArtMethod* method = instrumentation_frame.method_;
if (is_deoptimization) {
if (kVerboseInstrumentation) {
LOG(INFO) << "Popping for deoptimization " << PrettyMethod(method);
@@ -1082,17 +1057,6 @@ void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) c
}
}
-void Instrumentation::VisitRoots(RootVisitor* visitor) {
- WriterMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
- if (IsDeoptimizedMethodsEmpty()) {
- return;
- }
- BufferedRootVisitor<kDefaultBufferedRootCount> roots(visitor, RootInfo(kRootVMInternal));
- for (auto pair : deoptimized_methods_) {
- roots.VisitRoot(pair.second);
- }
-}
-
std::string InstrumentationStackFrame::Dump() const {
std::ostringstream os;
os << "Frame " << frame_id_ << " " << PrettyMethod(method_) << ":"
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 7d70d21..db8e9c2 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -19,7 +19,7 @@
#include <stdint.h>
#include <list>
-#include <map>
+#include <unordered_set>
#include "arch/instruction_set.h"
#include "base/macros.h"
@@ -29,12 +29,12 @@
namespace art {
namespace mirror {
- class ArtMethod;
class Class;
class Object;
class Throwable;
} // namespace mirror
class ArtField;
+class ArtMethod;
union JValue;
class Thread;
@@ -62,32 +62,32 @@ struct InstrumentationListener {
// Call-back for when a method is entered.
virtual void MethodEntered(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method,
+ ArtMethod* method,
uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Call-back for when a method is exited.
virtual void MethodExited(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
+ ArtMethod* method, uint32_t dex_pc,
const JValue& return_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Call-back for when a method is popped due to an exception throw. A method will either cause a
// MethodExited call-back or a MethodUnwind call-back when its activation is removed.
virtual void MethodUnwind(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc)
+ ArtMethod* method, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Call-back for when the dex pc moves in a method.
virtual void DexPcMoved(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t new_dex_pc)
+ ArtMethod* method, uint32_t new_dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Call-back for when we read from a field.
- virtual void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
+ virtual void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc, ArtField* field) = 0;
// Call-back for when we write into a field.
- virtual void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
+ virtual void FieldWritten(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc, ArtField* field, const JValue& field_value) = 0;
// Call-back when an exception is caught.
@@ -95,7 +95,7 @@ struct InstrumentationListener {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Call-back for when we get a backward branch.
- virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
};
@@ -162,19 +162,19 @@ class Instrumentation {
// Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
// method (except a class initializer) set to the resolution trampoline will be deoptimized only
// once its declaring class is initialized.
- void Deoptimize(mirror::ArtMethod* method)
+ void Deoptimize(ArtMethod* method)
LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
// (except a class initializer) set to the resolution trampoline will be updated only once its
// declaring class is initialized.
- void Undeoptimize(mirror::ArtMethod* method)
+ void Undeoptimize(ArtMethod* method)
LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Indicates whether the method has been deoptimized so it is executed with the interpreter.
- bool IsDeoptimized(mirror::ArtMethod* method)
+ bool IsDeoptimized(ArtMethod* method)
LOCKS_EXCLUDED(deoptimized_methods_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -205,13 +205,13 @@ class Instrumentation {
void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
// Update the code of a method respecting any installed stubs.
- void UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code)
+ void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
// will short-cut to GetCode if instrumentation and static method resolution stubs aren't
// installed.
- const void* GetQuickCodeFor(mirror::ArtMethod* method, size_t pointer_size) const
+ const void* GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ForceInterpretOnly() {
@@ -273,7 +273,7 @@ class Instrumentation {
// Inform listeners that a method has been entered. A dex PC is provided as we may install
// listeners into executing code and get method enter events for methods already on the stack.
void MethodEnterEvent(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) const
+ ArtMethod* method, uint32_t dex_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(HasMethodEntryListeners())) {
MethodEnterEventImpl(thread, this_object, method, dex_pc);
@@ -282,7 +282,7 @@ class Instrumentation {
// Inform listeners that a method has been exited.
void MethodExitEvent(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
+ ArtMethod* method, uint32_t dex_pc,
const JValue& return_value) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(HasMethodExitListeners())) {
@@ -292,12 +292,12 @@ class Instrumentation {
// Inform listeners that a method has been exited due to an exception.
void MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) const
+ ArtMethod* method, uint32_t dex_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Inform listeners that the dex pc has moved (only supported by the interpreter).
void DexPcMovedEvent(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) const
+ ArtMethod* method, uint32_t dex_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(HasDexPcListeners())) {
DexPcMovedEventImpl(thread, this_object, method, dex_pc);
@@ -305,7 +305,7 @@ class Instrumentation {
}
// Inform listeners that a backward branch has been taken (only supported by the interpreter).
- void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t offset) const
+ void BackwardBranch(Thread* thread, ArtMethod* method, int32_t offset) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(HasBackwardBranchListeners())) {
BackwardBranchImpl(thread, method, offset);
@@ -314,7 +314,7 @@ class Instrumentation {
// Inform listeners that we read a field (only supported by the interpreter).
void FieldReadEvent(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
+ ArtMethod* method, uint32_t dex_pc,
ArtField* field) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldReadListeners())) {
@@ -324,7 +324,7 @@ class Instrumentation {
// Inform listeners that we write a field (only supported by the interpreter).
void FieldWriteEvent(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
+ ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldWriteListeners())) {
@@ -339,7 +339,7 @@ class Instrumentation {
// Called when an instrumented method is entered. The intended link register (lr) is saved so
// that returning causes a branch to the method exit stub. Generates method enter events.
void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
- mirror::ArtMethod* method, uintptr_t lr,
+ ArtMethod* method, uintptr_t lr,
bool interpreter_entry)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -356,12 +356,9 @@ class Instrumentation {
// Call back for configure stubs.
void InstallStubsForClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void InstallStubsForMethod(mirror::ArtMethod* method)
+ void InstallStubsForMethod(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(deoptimized_methods_lock_);
-
private:
InstrumentationLevel GetCurrentInstrumentationLevel() const;
@@ -384,42 +381,39 @@ class Instrumentation {
void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) const
+ ArtMethod* method, uint32_t dex_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method,
+ ArtMethod* method,
uint32_t dex_pc, const JValue& return_value) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) const
+ ArtMethod* method, uint32_t dex_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method, int32_t offset) const
+ void BackwardBranchImpl(Thread* thread, ArtMethod* method, int32_t offset) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
+ ArtMethod* method, uint32_t dex_pc,
ArtField* field) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
+ ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Read barrier-aware utility functions for accessing deoptimized_methods_
- bool AddDeoptimizedMethod(mirror::ArtMethod* method)
+ bool AddDeoptimizedMethod(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
- bool FindDeoptimizedMethod(mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_);
- bool RemoveDeoptimizedMethod(mirror::ArtMethod* method)
+ bool IsDeoptimizedMethod(ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_);
+ bool RemoveDeoptimizedMethod(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
- mirror::ArtMethod* BeginDeoptimizedMethod()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ ArtMethod* BeginDeoptimizedMethod()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_);
bool IsDeoptimizedMethodsEmpty() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_);
// Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
bool instrumentation_stubs_installed_;
@@ -488,8 +482,7 @@ class Instrumentation {
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
mutable ReaderWriterMutex deoptimized_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::multimap<int32_t, GcRoot<mirror::ArtMethod>> deoptimized_methods_
- GUARDED_BY(deoptimized_methods_lock_);
+ std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
bool deoptimization_enabled_;
// Current interpreter handler table. This is updated each time the thread state flags are
@@ -509,7 +502,7 @@ std::ostream& operator<<(std::ostream& os, const Instrumentation::Instrumentatio
// An element in the instrumentation side stack maintained in art::Thread.
struct InstrumentationStackFrame {
- InstrumentationStackFrame(mirror::Object* this_object, mirror::ArtMethod* method,
+ InstrumentationStackFrame(mirror::Object* this_object, ArtMethod* method,
uintptr_t return_pc, size_t frame_id, bool interpreter_entry)
: this_object_(this_object), method_(method), return_pc_(return_pc), frame_id_(frame_id),
interpreter_entry_(interpreter_entry) {
@@ -518,7 +511,7 @@ struct InstrumentationStackFrame {
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* this_object_;
- mirror::ArtMethod* method_;
+ ArtMethod* method_;
uintptr_t return_pc_;
size_t frame_id_;
bool interpreter_entry_;
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 5afacb8..85bb8c4 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -42,7 +42,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
void MethodEntered(Thread* thread ATTRIBUTE_UNUSED,
mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
received_method_enter_event = true;
@@ -50,7 +50,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
void MethodExited(Thread* thread ATTRIBUTE_UNUSED,
mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -59,7 +59,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED,
mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
received_method_unwind_event = true;
@@ -67,7 +67,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
void DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
received_dex_pc_moved_event = true;
@@ -75,7 +75,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
void FieldRead(Thread* thread ATTRIBUTE_UNUSED,
mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -84,7 +84,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
void FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
const JValue& field_value ATTRIBUTE_UNUSED)
@@ -99,7 +99,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio
}
void BackwardBranch(Thread* thread ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ ArtMethod* method ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
received_backward_branch_event = true;
@@ -170,7 +170,7 @@ class InstrumentationTest : public CommonRuntimeTest {
soa.Self()->TransitionFromSuspendedToRunnable();
}
- mirror::ArtMethod* const event_method = nullptr;
+ ArtMethod* const event_method = nullptr;
mirror::Object* const event_obj = nullptr;
const uint32_t event_dex_pc = 0;
@@ -197,8 +197,7 @@ class InstrumentationTest : public CommonRuntimeTest {
EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event));
}
- void DeoptimizeMethod(Thread* self, Handle<mirror::ArtMethod> method,
- bool enable_deoptimization)
+ void DeoptimizeMethod(Thread* self, ArtMethod* method, bool enable_deoptimization)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
@@ -207,19 +206,19 @@ class InstrumentationTest : public CommonRuntimeTest {
if (enable_deoptimization) {
instrumentation->EnableDeoptimization();
}
- instrumentation->Deoptimize(method.Get());
+ instrumentation->Deoptimize(method);
runtime->GetThreadList()->ResumeAll();
self->TransitionFromSuspendedToRunnable();
}
- void UndeoptimizeMethod(Thread* self, Handle<mirror::ArtMethod> method,
+ void UndeoptimizeMethod(Thread* self, ArtMethod* method,
const char* key, bool disable_deoptimization)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
self->TransitionFromRunnableToSuspended(kSuspended);
runtime->GetThreadList()->SuspendAll("Single method undeoptimization");
- instrumentation->Undeoptimize(method.Get());
+ instrumentation->Undeoptimize(method);
if (disable_deoptimization) {
instrumentation->DisableDeoptimization(key);
}
@@ -304,7 +303,7 @@ class InstrumentationTest : public CommonRuntimeTest {
}
static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type,
- Thread* self, mirror::ArtMethod* method, mirror::Object* obj,
+ Thread* self, ArtMethod* method, mirror::Object* obj,
uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
switch (event_type) {
@@ -434,28 +433,28 @@ TEST_F(InstrumentationTest, DeoptimizeDirectMethod) {
Runtime* const runtime = Runtime::Current();
instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
ClassLinker* class_linker = runtime->GetClassLinker();
- StackHandleScope<2> hs(soa.Self());
+ StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
ASSERT_TRUE(klass != nullptr);
- Handle<mirror::ArtMethod> method_to_deoptimize(
- hs.NewHandle(klass->FindDeclaredDirectMethod("instanceMethod", "()V")));
- ASSERT_TRUE(method_to_deoptimize.Get() != nullptr);
+ ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V",
+ sizeof(void*));
+ ASSERT_TRUE(method_to_deoptimize != nullptr);
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
- EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+ EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize));
DeoptimizeMethod(soa.Self(), method_to_deoptimize, true);
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
EXPECT_TRUE(instr->AreExitStubsInstalled());
- EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+ EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize));
constexpr const char* instrumentation_key = "DeoptimizeDirectMethod";
UndeoptimizeMethod(soa.Self(), method_to_deoptimize, instrumentation_key, true);
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
- EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+ EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize));
}
TEST_F(InstrumentationTest, FullDeoptimization) {
@@ -481,16 +480,16 @@ TEST_F(InstrumentationTest, MixedDeoptimization) {
Runtime* const runtime = Runtime::Current();
instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
ClassLinker* class_linker = runtime->GetClassLinker();
- StackHandleScope<2> hs(soa.Self());
+ StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
ASSERT_TRUE(klass != nullptr);
- Handle<mirror::ArtMethod> method_to_deoptimize(
- hs.NewHandle(klass->FindDeclaredDirectMethod("instanceMethod", "()V")));
- ASSERT_TRUE(method_to_deoptimize.Get() != nullptr);
+ ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V",
+ sizeof(void*));
+ ASSERT_TRUE(method_to_deoptimize != nullptr);
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
- EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+ EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize));
DeoptimizeMethod(soa.Self(), method_to_deoptimize, true);
// Deoptimizing a method does not change instrumentation level.
@@ -498,7 +497,7 @@ TEST_F(InstrumentationTest, MixedDeoptimization) {
GetCurrentInstrumentationLevel());
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
EXPECT_TRUE(instr->AreExitStubsInstalled());
- EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+ EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize));
constexpr const char* instrumentation_key = "MixedDeoptimization";
DeoptimizeEverything(soa.Self(), instrumentation_key, false);
@@ -506,20 +505,20 @@ TEST_F(InstrumentationTest, MixedDeoptimization) {
GetCurrentInstrumentationLevel());
EXPECT_TRUE(instr->AreAllMethodsDeoptimized());
EXPECT_TRUE(instr->AreExitStubsInstalled());
- EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+ EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize));
UndeoptimizeEverything(soa.Self(), instrumentation_key, false);
EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentNothing,
GetCurrentInstrumentationLevel());
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
EXPECT_TRUE(instr->AreExitStubsInstalled());
- EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+ EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize));
UndeoptimizeMethod(soa.Self(), method_to_deoptimize, instrumentation_key, true);
EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentNothing,
GetCurrentInstrumentationLevel());
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
- EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+ EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize));
}
TEST_F(InstrumentationTest, MethodTracing_Interpreter) {
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index a85d10f..9abbca8 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -18,6 +18,7 @@
#include <memory>
+#include "gc_root-inl.h"
#include "gc/space/image_space.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 7d634b3..446c5bb 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -22,10 +22,10 @@
namespace art {
namespace mirror {
-class ArtMethod;
class Object;
} // namespace mirror
+class ArtMethod;
union JValue;
class ShadowFrame;
class Thread;
@@ -33,7 +33,7 @@ class Thread;
namespace interpreter {
// Called by ArtMethod::Invoke, shadow frames arguments are taken from the args array.
-extern void EnterInterpreterFromInvoke(Thread* self, mirror::ArtMethod* method,
+extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
mirror::Object* receiver, uint32_t* args, JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 363c65a..1ed1a64 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -413,25 +413,19 @@ EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimNot) // iput-objec
#undef EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL
#undef EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL
-uint32_t FindNextInstructionFollowingException(Thread* self,
- ShadowFrame& shadow_frame,
- uint32_t dex_pc,
- const instrumentation::Instrumentation* instrumentation) {
+uint32_t FindNextInstructionFollowingException(
+ Thread* self, ShadowFrame& shadow_frame, uint32_t dex_pc,
+ const instrumentation::Instrumentation* instrumentation) {
self->VerifyStack();
- StackHandleScope<3> hs(self);
+ StackHandleScope<2> hs(self);
Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException()));
if (instrumentation->HasExceptionCaughtListeners()
&& self->IsExceptionThrownByCurrentMethod(exception.Get())) {
instrumentation->ExceptionCaughtEvent(self, exception.Get());
}
bool clear_exception = false;
- uint32_t found_dex_pc;
- {
- Handle<mirror::Class> exception_class(hs.NewHandle(exception->GetClass()));
- Handle<mirror::ArtMethod> h_method(hs.NewHandle(shadow_frame.GetMethod()));
- found_dex_pc = mirror::ArtMethod::FindCatchBlock(h_method, exception_class, dex_pc,
- &clear_exception);
- }
+ uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(
+ hs.NewHandle(exception->GetClass()), dex_pc, &clear_exception);
if (found_dex_pc == DexFile::kDexNoIndex) {
// Exception is not caught by the current method. We will unwind to the
// caller. Notify any instrumentation listener.
@@ -651,7 +645,7 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
UNREACHABLE();
}
// Force the use of interpreter when it is required by the debugger.
- mirror::EntryPointFromInterpreter* entry;
+ EntryPointFromInterpreter* entry;
if (UNLIKELY(Dbg::IsForcedInterpreterNeededForCalling(self, new_shadow_frame->GetMethod()))) {
entry = &art::artInterpreterToInterpreterBridge;
} else {
@@ -668,7 +662,7 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
shadow_frame.SetVRegReference(vregC, result->GetL());
// Overwrite all potential copies of the original result of the new-instance of string with the
// new result of the StringFactory. Use the verifier to find this set of registers.
- mirror::ArtMethod* method = shadow_frame.GetMethod();
+ ArtMethod* method = shadow_frame.GetMethod();
MethodReference method_ref = method->ToMethodReference();
SafeMap<uint32_t, std::set<uint32_t>> string_init_map;
SafeMap<uint32_t, std::set<uint32_t>>* string_init_map_ptr;
@@ -788,13 +782,17 @@ void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
RecordArrayElementsInTransactionImpl(array->AsShortArray(), count);
break;
case Primitive::kPrimInt:
- case Primitive::kPrimFloat:
RecordArrayElementsInTransactionImpl(array->AsIntArray(), count);
break;
+ case Primitive::kPrimFloat:
+ RecordArrayElementsInTransactionImpl(array->AsFloatArray(), count);
+ break;
case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
RecordArrayElementsInTransactionImpl(array->AsLongArray(), count);
break;
+ case Primitive::kPrimDouble:
+ RecordArrayElementsInTransactionImpl(array->AsDoubleArray(), count);
+ break;
default:
LOG(FATAL) << "Unsupported primitive type " << primitive_component_type
<< " in fill-array-data";
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 6acc72e..6fafcd1 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -25,6 +25,7 @@
#include <sstream>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/macros.h"
#include "class_linker-inl.h"
@@ -33,7 +34,6 @@
#include "dex_instruction-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "handle_scope-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -41,7 +41,7 @@
#include "thread.h"
#include "well_known_classes.h"
-using ::art::mirror::ArtMethod;
+using ::art::ArtMethod;
using ::art::mirror::Array;
using ::art::mirror::BooleanArray;
using ::art::mirror::ByteArray;
@@ -105,7 +105,7 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
- mirror::ArtMethod* sf_method = shadow_frame.GetMethod();
+ ArtMethod* sf_method = shadow_frame.GetMethod();
ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
method_idx, &receiver, &sf_method, self);
// The shadow frame should already be pushed, so we don't need to update it.
@@ -139,7 +139,8 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
}
const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
CHECK(receiver->GetClass()->ShouldHaveEmbeddedImtAndVTable());
- ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(vtable_idx);
+ ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
+ vtable_idx, sizeof(void*));
if (UNLIKELY(called_method == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
@@ -184,7 +185,6 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1
// java.lang.String class is initialized.
static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(!kMovingMethods);
Class* java_lang_string_class = String::GetJavaLangString();
if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -195,7 +195,7 @@ static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uin
return nullptr;
}
}
- mirror::ArtMethod* method = shadow_frame.GetMethod();
+ ArtMethod* method = shadow_frame.GetMethod();
mirror::Class* declaring_class = method->GetDeclaringClass();
mirror::String* s = declaring_class->GetDexCacheStrings()->Get(string_idx);
if (UNLIKELY(s == nullptr)) {
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index dd1f55e..86027c5 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -1042,7 +1042,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- IntArray* array = a->AsIntArray();
+ DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a);
+ auto* array = down_cast<IntArray*>(a);
if (LIKELY(array->CheckIsValidIndex(index))) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
ADVANCE(2);
@@ -1060,7 +1061,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- LongArray* array = a->AsLongArray();
+ DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a);
+ auto* array = down_cast<LongArray*>(a);
if (LIKELY(array->CheckIsValidIndex(index))) {
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
ADVANCE(2);
@@ -1173,7 +1175,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
} else {
int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- IntArray* array = a->AsIntArray();
+ DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a);
+ auto* array = down_cast<IntArray*>(a);
if (LIKELY(array->CheckIsValidIndex(index))) {
array->SetWithoutChecks<transaction_active>(index, val);
ADVANCE(2);
@@ -1192,7 +1195,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
} else {
int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- LongArray* array = a->AsLongArray();
+ DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a);
+ auto* array = down_cast<LongArray*>(a);
if (LIKELY(array->CheckIsValidIndex(index))) {
array->SetWithoutChecks<transaction_active>(index, val);
ADVANCE(2);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 0e3420f..dd7aa40 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -888,7 +888,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
break;
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- IntArray* array = a->AsIntArray();
+ DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a);
+ auto* array = down_cast<IntArray*>(a);
if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
@@ -906,7 +907,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
break;
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- LongArray* array = a->AsLongArray();
+ DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a);
+ auto* array = down_cast<LongArray*>(a);
if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
@@ -1019,7 +1021,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- IntArray* array = a->AsIntArray();
+ DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a);
+ auto* array = down_cast<IntArray*>(a);
if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
@@ -1038,7 +1041,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
}
int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- LongArray* array = a->AsLongArray();
+ DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a);
+ LongArray* array = down_cast<LongArray*>(a);
if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 738e52b..43e24fa 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -21,6 +21,7 @@
#include "ScopedLocalRef.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/macros.h"
#include "class_linker.h"
@@ -29,7 +30,6 @@
#include "handle_scope-inl.h"
#include "interpreter/interpreter_common.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class.h"
#include "mirror/field-inl.h"
#include "mirror/object-inl.h"
@@ -121,8 +121,7 @@ static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, siz
}
void UnstartedRuntime::UnstartedClassForName(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset);
if (class_name == nullptr) {
return;
@@ -135,8 +134,7 @@ void UnstartedRuntime::UnstartedClassForName(
}
void UnstartedRuntime::UnstartedClassForNameLong(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset);
if (class_name == nullptr) {
return;
@@ -153,8 +151,7 @@ void UnstartedRuntime::UnstartedClassForNameLong(
}
void UnstartedRuntime::UnstartedClassClassForName(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset);
if (class_name == nullptr) {
return;
@@ -171,9 +168,8 @@ void UnstartedRuntime::UnstartedClassClassForName(
}
void UnstartedRuntime::UnstartedClassNewInstance(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- StackHandleScope<3> hs(self); // Class, constructor, object.
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ StackHandleScope<2> hs(self); // Class, constructor, object.
mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
if (param == nullptr) {
AbortTransactionOrFail(self, "Null-pointer in Class.newInstance.");
@@ -202,13 +198,13 @@ void UnstartedRuntime::UnstartedClassNewInstance(
// 2) If we can't find the default constructor. We'll postpone the exception to runtime.
// Note that 2) could likely be handled here, but for safety abort the transaction.
bool ok = false;
- if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
- Handle<mirror::ArtMethod> h_cons(hs.NewHandle(
- h_klass->FindDeclaredDirectMethod("<init>", "()V")));
- if (h_cons.Get() != nullptr) {
+ auto* cl = Runtime::Current()->GetClassLinker();
+ if (cl->EnsureInitialized(self, h_klass, true, true)) {
+ auto* cons = h_klass->FindDeclaredDirectMethod("<init>", "()V", cl->GetImagePointerSize());
+ if (cons != nullptr) {
Handle<mirror::Object> h_obj(hs.NewHandle(klass->AllocObject(self)));
CHECK(h_obj.Get() != nullptr); // We don't expect OOM at compile-time.
- EnterInterpreterFromInvoke(self, h_cons.Get(), h_obj.Get(), nullptr, nullptr);
+ EnterInterpreterFromInvoke(self, cons, h_obj.Get(), nullptr, nullptr);
if (!self->IsExceptionPending()) {
result->SetL(h_obj.Get());
ok = true;
@@ -227,8 +223,7 @@ void UnstartedRuntime::UnstartedClassNewInstance(
}
void UnstartedRuntime::UnstartedClassGetDeclaredField(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
// Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
// going the reflective Dex way.
mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
@@ -266,8 +261,7 @@ void UnstartedRuntime::UnstartedClassGetDeclaredField(
}
void UnstartedRuntime::UnstartedVmClassLoaderFindLoadedClass(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
mirror::ClassLoader* class_loader =
down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset));
@@ -286,11 +280,9 @@ void UnstartedRuntime::UnstartedVmClassLoaderFindLoadedClass(
}
}
-void UnstartedRuntime::UnstartedVoidLookupType(Thread* self ATTRIBUTE_UNUSED,
- ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
- JValue* result,
- size_t arg_offset ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedVoidLookupType(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, JValue* result,
+ size_t arg_offset ATTRIBUTE_UNUSED) {
result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
}
@@ -324,8 +316,7 @@ static void PrimitiveArrayCopy(Thread* self,
}
void UnstartedRuntime::UnstartedSystemArraycopy(
- Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) {
// Special case array copying without initializing System.
jint src_pos = shadow_frame->GetVReg(arg_offset + 1);
jint dst_pos = shadow_frame->GetVReg(arg_offset + 3);
@@ -410,22 +401,19 @@ void UnstartedRuntime::UnstartedSystemArraycopy(
}
void UnstartedRuntime::UnstartedSystemArraycopyChar(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
// Just forward.
UnstartedRuntime::UnstartedSystemArraycopy(self, shadow_frame, result, arg_offset);
}
void UnstartedRuntime::UnstartedSystemArraycopyInt(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
// Just forward.
UnstartedRuntime::UnstartedSystemArraycopy(self, shadow_frame, result, arg_offset);
}
void UnstartedRuntime::UnstartedThreadLocalGet(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
bool ok = false;
if (caller == "java.lang.String java.lang.IntegralToString.convertInt"
@@ -450,8 +438,9 @@ void UnstartedRuntime::UnstartedThreadLocalGet(
Handle<mirror::Object> h_real_to_string_obj(hs.NewHandle(
h_real_to_string_class->AllocObject(self)));
if (h_real_to_string_obj.Get() != nullptr) {
- mirror::ArtMethod* init_method =
- h_real_to_string_class->FindDirectMethod("<init>", "()V");
+ auto* cl = Runtime::Current()->GetClassLinker();
+ ArtMethod* init_method = h_real_to_string_class->FindDirectMethod(
+ "<init>", "()V", cl->GetImagePointerSize());
if (init_method == nullptr) {
h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
} else {
@@ -488,16 +477,8 @@ void UnstartedRuntime::UnstartedMathCeil(
result->SetD(out);
}
-void UnstartedRuntime::UnstartedArtMethodGetMethodName(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = shadow_frame->GetVRegReference(arg_offset)->AsArtMethod();
- result->SetL(method->GetNameAsString(self));
-}
-
void UnstartedRuntime::UnstartedObjectHashCode(
- Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
result->SetI(obj->IdentityHashCode());
}
@@ -537,8 +518,7 @@ static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_ca
}
void UnstartedRuntime::UnstartedDexCacheGetDexNative(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
// We will create the Dex object, but the image writer will release it before creating the
// art file.
mirror::Object* src = shadow_frame->GetVRegReference(arg_offset);
@@ -600,26 +580,22 @@ static void UnstartedMemoryPeek(
}
void UnstartedRuntime::UnstartedMemoryPeekByte(
- Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
UnstartedMemoryPeek(Primitive::kPrimByte, shadow_frame, result, arg_offset);
}
void UnstartedRuntime::UnstartedMemoryPeekShort(
- Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
UnstartedMemoryPeek(Primitive::kPrimShort, shadow_frame, result, arg_offset);
}
void UnstartedRuntime::UnstartedMemoryPeekInt(
- Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
UnstartedMemoryPeek(Primitive::kPrimInt, shadow_frame, result, arg_offset);
}
void UnstartedRuntime::UnstartedMemoryPeekLong(
- Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
UnstartedMemoryPeek(Primitive::kPrimLong, shadow_frame, result, arg_offset);
}
@@ -673,18 +649,14 @@ static void UnstartedMemoryPeekArray(
}
void UnstartedRuntime::UnstartedMemoryPeekByteArray(
- Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) {
UnstartedMemoryPeekArray(Primitive::kPrimByte, self, shadow_frame, arg_offset);
}
// This allows reading security.properties in an unstarted runtime and initialize Security.
void UnstartedRuntime::UnstartedSecurityGetSecurityPropertiesReader(
- Thread* self,
- ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
- JValue* result,
- size_t arg_offset ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, JValue* result,
+ size_t arg_offset ATTRIBUTE_UNUSED) {
Runtime* runtime = Runtime::Current();
const std::vector<const DexFile*>& path = runtime->GetClassLinker()->GetBootClassPath();
std::string canonical(DexFile::GetDexCanonicalLocation(path[0]->GetLocation().c_str()));
@@ -753,8 +725,9 @@ void UnstartedRuntime::UnstartedSecurityGetSecurityPropertiesReader(
return;
}
- mirror::ArtMethod* constructor = h_class->FindDeclaredDirectMethod("<init>",
- "(Ljava/lang/String;)V");
+ auto* cl = Runtime::Current()->GetClassLinker();
+ ArtMethod* constructor = h_class->FindDeclaredDirectMethod(
+ "<init>", "(Ljava/lang/String;)V", cl->GetImagePointerSize());
if (constructor == nullptr) {
AbortTransactionOrFail(self, "Could not find StringReader constructor");
return;
@@ -774,8 +747,7 @@ void UnstartedRuntime::UnstartedSecurityGetSecurityPropertiesReader(
// This allows reading the new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringGetCharsNoCheck(
- Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) {
jint start = shadow_frame->GetVReg(arg_offset + 1);
jint end = shadow_frame->GetVReg(arg_offset + 2);
jint index = shadow_frame->GetVReg(arg_offset + 4);
@@ -787,7 +759,8 @@ void UnstartedRuntime::UnstartedStringGetCharsNoCheck(
DCHECK_GE(start, 0);
DCHECK_GE(end, string->GetLength());
StackHandleScope<1> hs(self);
- Handle<mirror::CharArray> h_char_array(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 3)->AsCharArray()));
+ Handle<mirror::CharArray> h_char_array(
+ hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 3)->AsCharArray()));
DCHECK_LE(index, h_char_array->GetLength());
DCHECK_LE(end - start, h_char_array->GetLength() - index);
string->GetChars(start, end, h_char_array, index);
@@ -795,8 +768,7 @@ void UnstartedRuntime::UnstartedStringGetCharsNoCheck(
// This allows reading chars from the new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringCharAt(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
jint index = shadow_frame->GetVReg(arg_offset + 1);
mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
if (string == nullptr) {
@@ -808,8 +780,7 @@ void UnstartedRuntime::UnstartedStringCharAt(
// This allows setting chars from the new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringSetCharAt(
- Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) {
jint index = shadow_frame->GetVReg(arg_offset + 1);
jchar c = shadow_frame->GetVReg(arg_offset + 2);
mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
@@ -822,13 +793,13 @@ void UnstartedRuntime::UnstartedStringSetCharAt(
// This allows creating the new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringFactoryNewStringFromChars(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
jint offset = shadow_frame->GetVReg(arg_offset);
jint char_count = shadow_frame->GetVReg(arg_offset + 1);
DCHECK_GE(char_count, 0);
StackHandleScope<1> hs(self);
- Handle<mirror::CharArray> h_char_array(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray()));
+ Handle<mirror::CharArray> h_char_array(
+ hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray()));
Runtime* runtime = Runtime::Current();
gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
result->SetL(mirror::String::AllocFromCharArray<true>(self, char_count, h_char_array, offset, allocator));
@@ -836,8 +807,7 @@ void UnstartedRuntime::UnstartedStringFactoryNewStringFromChars(
// This allows creating the new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringFactoryNewStringFromString(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
mirror::String* to_copy = shadow_frame->GetVRegReference(arg_offset)->AsString();
if (to_copy == nullptr) {
AbortTransactionOrFail(self, "StringFactory.newStringFromString with null object");
@@ -852,14 +822,14 @@ void UnstartedRuntime::UnstartedStringFactoryNewStringFromString(
}
void UnstartedRuntime::UnstartedStringFastSubstring(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
jint start = shadow_frame->GetVReg(arg_offset + 1);
jint length = shadow_frame->GetVReg(arg_offset + 2);
DCHECK_GE(start, 0);
DCHECK_GE(length, 0);
StackHandleScope<1> hs(self);
- Handle<mirror::String> h_string(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsString()));
+ Handle<mirror::String> h_string(
+ hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsString()));
DCHECK_LE(start, h_string->GetLength());
DCHECK_LE(start + length, h_string->GetLength());
Runtime* runtime = Runtime::Current();
@@ -879,12 +849,9 @@ void UnstartedRuntime::UnstartedStringToCharArray(
result->SetL(string->ToCharArray(self));
}
-void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args, JValue* result) {
int32_t length = args[1];
DCHECK_GE(length, 0);
mirror::Class* element_class = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
@@ -896,20 +863,15 @@ void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self,
array_class->GetComponentSizeShift(), allocator));
}
-void UnstartedRuntime::UnstartedJNIVMStackGetCallingClassLoader(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args ATTRIBUTE_UNUSED,
- JValue* result) {
+void UnstartedRuntime::UnstartedJNIVMStackGetCallingClassLoader(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
result->SetL(nullptr);
}
-void UnstartedRuntime::UnstartedJNIVMStackGetStackClass2(Thread* self,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args ATTRIBUTE_UNUSED,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIVMStackGetStackClass2(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
NthCallerVisitor visitor(self, 3);
visitor.WalkStack();
if (visitor.caller != nullptr) {
@@ -917,76 +879,56 @@ void UnstartedRuntime::UnstartedJNIVMStackGetStackClass2(Thread* self,
}
}
-void UnstartedRuntime::UnstartedJNIMathLog(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result) {
+void UnstartedRuntime::UnstartedJNIMathLog(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
JValue value;
value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
result->SetD(log(value.GetD()));
}
-void UnstartedRuntime::UnstartedJNIMathExp(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result) {
+void UnstartedRuntime::UnstartedJNIMathExp(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
JValue value;
value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
result->SetD(exp(value.GetD()));
}
-void UnstartedRuntime::UnstartedJNIClassGetNameNative(Thread* self,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver,
- uint32_t* args ATTRIBUTE_UNUSED,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIClassGetNameNative(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
StackHandleScope<1> hs(self);
result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
}
-void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result) {
+void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
result->SetI(args[0]);
}
-void UnstartedRuntime::UnstartedJNIFloatIntBitsToFloat(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result) {
+void UnstartedRuntime::UnstartedJNIFloatIntBitsToFloat(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
result->SetI(args[0]);
}
-void UnstartedRuntime::UnstartedJNIObjectInternalClone(Thread* self,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver,
- uint32_t* args ATTRIBUTE_UNUSED,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIObjectInternalClone(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
result->SetL(receiver->Clone(self));
}
-void UnstartedRuntime::UnstartedJNIObjectNotifyAll(Thread* self,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver,
- uint32_t* args ATTRIBUTE_UNUSED,
- JValue* result ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIObjectNotifyAll(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED, JValue* result ATTRIBUTE_UNUSED) {
receiver->NotifyAll(self);
}
-void UnstartedRuntime::UnstartedJNIStringCompareTo(Thread* self,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver,
- uint32_t* args,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIStringCompareTo(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, uint32_t* args,
+ JValue* result) {
mirror::String* rhs = reinterpret_cast<mirror::Object*>(args[0])->AsString();
if (rhs == nullptr) {
AbortTransactionOrFail(self, "String.compareTo with null object");
@@ -994,42 +936,30 @@ void UnstartedRuntime::UnstartedJNIStringCompareTo(Thread* self,
result->SetI(receiver->AsString()->CompareTo(rhs));
}
-void UnstartedRuntime::UnstartedJNIStringIntern(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver,
- uint32_t* args ATTRIBUTE_UNUSED,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIStringIntern(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
result->SetL(receiver->AsString()->Intern());
}
-void UnstartedRuntime::UnstartedJNIStringFastIndexOf(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver,
- uint32_t* args,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIStringFastIndexOf(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
+ uint32_t* args, JValue* result) {
result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1]));
}
-void UnstartedRuntime::UnstartedJNIArrayCreateMultiArray(Thread* self,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIArrayCreateMultiArray(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args, JValue* result) {
StackHandleScope<2> hs(self);
auto h_class(hs.NewHandle(reinterpret_cast<mirror::Class*>(args[0])->AsClass()));
auto h_dimensions(hs.NewHandle(reinterpret_cast<mirror::IntArray*>(args[1])->AsIntArray()));
result->SetL(mirror::Array::CreateMultiArray(self, h_class, h_dimensions));
}
-void UnstartedRuntime::UnstartedJNIArrayCreateObjectArray(Thread* self,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIArrayCreateObjectArray(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args, JValue* result) {
int32_t length = static_cast<int32_t>(args[1]);
if (length < 0) {
ThrowNegativeArraySizeException(length);
@@ -1049,12 +979,9 @@ void UnstartedRuntime::UnstartedJNIArrayCreateObjectArray(Thread* self,
result->SetL(new_array);
}
-void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace(Thread* self,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args ATTRIBUTE_UNUSED,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
ScopedObjectAccessUnchecked soa(self);
if (Runtime::Current()->IsActiveTransaction()) {
result->SetL(soa.Decode<mirror::Object*>(self->CreateInternalStackTrace<true>(soa)));
@@ -1063,30 +990,22 @@ void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace(Thread* self,
}
}
-void UnstartedRuntime::UnstartedJNISystemIdentityHashCode(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNISystemIdentityHashCode(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
}
-void UnstartedRuntime::UnstartedJNIByteOrderIsLittleEndian(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args ATTRIBUTE_UNUSED,
- JValue* result) {
+void UnstartedRuntime::UnstartedJNIByteOrderIsLittleEndian(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
result->SetZ(JNI_TRUE);
}
-void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
jint expectedValue = args[3];
@@ -1102,12 +1021,9 @@ void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt(Thread* self ATTRIBUT
result->SetZ(success ? JNI_TRUE : JNI_FALSE);
}
-void UnstartedRuntime::UnstartedJNIUnsafePutObject(Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result ATTRIBUTE_UNUSED)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void UnstartedRuntime::UnstartedJNIUnsafePutObject(
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result ATTRIBUTE_UNUSED) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
mirror::Object* newValue = reinterpret_cast<mirror::Object*>(args[3]);
@@ -1119,24 +1035,16 @@ void UnstartedRuntime::UnstartedJNIUnsafePutObject(Thread* self ATTRIBUTE_UNUSED
}
void UnstartedRuntime::UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType(
- Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
mirror::Class* component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
Primitive::Type primitive_type = component->GetPrimitiveType();
result->SetI(mirror::Array::DataOffset(Primitive::ComponentSize(primitive_type)).Int32Value());
}
void UnstartedRuntime::UnstartedJNIUnsafeGetArrayIndexScaleForComponentType(
- Thread* self ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
mirror::Class* component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
Primitive::Type primitive_type = component->GetPrimitiveType();
result->SetI(Primitive::ComponentSize(primitive_type));
@@ -1145,7 +1053,7 @@ void UnstartedRuntime::UnstartedJNIUnsafeGetArrayIndexScaleForComponentType(
typedef void (*InvokeHandler)(Thread* self, ShadowFrame* shadow_frame, JValue* result,
size_t arg_size);
-typedef void (*JNIHandler)(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
+typedef void (*JNIHandler)(Thread* self, ArtMethod* method, mirror::Object* receiver,
uint32_t* args, JValue* result);
static bool tables_initialized_ = false;
@@ -1200,7 +1108,7 @@ void UnstartedRuntime::Invoke(Thread* self, const DexFile::CodeItem* code_item,
}
// Hand select a number of methods to be run in a not yet started runtime without using JNI.
-void UnstartedRuntime::Jni(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
+void UnstartedRuntime::Jni(Thread* self, ArtMethod* method, mirror::Object* receiver,
uint32_t* args, JValue* result) {
std::string name(PrettyMethod(method));
const auto& iter = jni_handlers_.find(name);
diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h
index a361af0..a357d5f 100644
--- a/runtime/interpreter/unstarted_runtime.h
+++ b/runtime/interpreter/unstarted_runtime.h
@@ -24,14 +24,12 @@
namespace art {
+class ArtMethod;
class Thread;
class ShadowFrame;
namespace mirror {
-
-class ArtMethod;
class Object;
-
} // namespace mirror
namespace interpreter {
@@ -57,7 +55,7 @@ class UnstartedRuntime {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void Jni(Thread* self,
- mirror::ArtMethod* method,
+ ArtMethod* method,
mirror::Object* receiver,
uint32_t* args,
JValue* result)
@@ -80,7 +78,7 @@ class UnstartedRuntime {
// Methods that are native.
#define UNSTARTED_JNI(ShortName, SigIgnored) \
static void UnstartedJNI ## ShortName(Thread* self, \
- mirror::ArtMethod* method, \
+ ArtMethod* method, \
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index 8f6014c..047e906 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -31,7 +31,6 @@
V(SystemArraycopyInt, "void java.lang.System.arraycopy(int[], int, int[], int, int)") \
V(ThreadLocalGet, "java.lang.Object java.lang.ThreadLocal.get()") \
V(MathCeil, "double java.lang.Math.ceil(double)") \
- V(ArtMethodGetMethodName, "java.lang.String java.lang.reflect.ArtMethod.getMethodName(java.lang.reflect.ArtMethod)") \
V(ObjectHashCode, "int java.lang.Object.hashCode()") \
V(DoubleDoubleToRawLongBits, "long java.lang.Double.doubleToRawLongBits(double)") \
V(DexCacheGetDexNative, "com.android.dex.Dex java.lang.DexCache.getDexNative()") \
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 9179d17..4b672e0 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -52,9 +52,9 @@ class UnstartedRuntimeTest : public CommonRuntimeTest {
#undef UNSTARTED_DIRECT
// Methods that are native.
-#define UNSTARTED_JNI(Name, SigIgnored) \
+#define UNSTARTED_JNI(Name, SigIgnored) \
static void UnstartedJNI ## Name(Thread* self, \
- mirror::ArtMethod* method, \
+ ArtMethod* method, \
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
@@ -253,7 +253,8 @@ TEST_F(UnstartedRuntimeTest, StringInit) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
mirror::Class* klass = mirror::String::GetJavaLangString();
- mirror::ArtMethod* method = klass->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V");
+ ArtMethod* method = klass->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V",
+ sizeof(void*));
// create instruction data for invoke-direct {v0, v1} of method with fake index
uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 3142089..2d3d19c 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -18,6 +18,7 @@
#include <dlfcn.h>
+#include "art_method.h"
#include "base/dumpable.h"
#include "base/mutex.h"
#include "base/stl_util.h"
@@ -25,7 +26,6 @@
#include "dex_file-inl.h"
#include "fault_handler.h"
#include "indirect_reference_table-inl.h"
-#include "mirror/art_method.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "nativebridge/native_bridge.h"
@@ -205,7 +205,7 @@ class Libraries {
}
// See section 11.3 "Linking Native Methods" of the JNI spec.
- void* FindNativeMethod(mirror::ArtMethod* m, std::string& detail)
+ void* FindNativeMethod(ArtMethod* m, std::string& detail)
EXCLUSIVE_LOCKS_REQUIRED(Locks::jni_libraries_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string jni_short_name(JniShortName(m));
@@ -386,7 +386,7 @@ JavaVMExt::~JavaVMExt() {
void JavaVMExt::JniAbort(const char* jni_function_name, const char* msg) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- mirror::ArtMethod* current_method = self->GetCurrentMethod(nullptr);
+ ArtMethod* current_method = self->GetCurrentMethod(nullptr);
std::ostringstream os;
os << "JNI DETECTED ERROR IN APPLICATION: " << msg;
@@ -424,7 +424,7 @@ void JavaVMExt::JniAbortF(const char* jni_function_name, const char* fmt, ...) {
va_end(args);
}
-bool JavaVMExt::ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool JavaVMExt::ShouldTrace(ArtMethod* method) {
// Fast where no tracing is enabled.
if (trace_.empty() && !VLOG_IS_ON(third_party_jni)) {
return false;
@@ -737,7 +737,7 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject
return was_successful;
}
-void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) {
+void* JavaVMExt::FindCodeForNativeMethod(ArtMethod* m) {
CHECK(m->IsNative());
mirror::Class* c = m->GetDeclaringClass();
// If this is a static method, it could be called before the class has been initialized.
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 594027c..4fdf45a 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -27,10 +27,10 @@
namespace art {
namespace mirror {
- class ArtMethod;
class Array;
} // namespace mirror
+class ArtMethod;
class Libraries;
class ParsedOptions;
class Runtime;
@@ -77,7 +77,7 @@ class JavaVMExt : public JavaVM {
// such as NewByteArray.
// If -verbose:third-party-jni is on, we want to log any JNI function calls
// made by a third-party native method.
- bool ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ShouldTrace(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/**
* Loads the given shared library. 'path' is an absolute pathname.
@@ -92,7 +92,7 @@ class JavaVMExt : public JavaVM {
* Returns a pointer to the code for the native method 'm', found
* using dlsym(3) on every native library that's been loaded so far.
*/
- void* FindCodeForNativeMethod(mirror::ArtMethod* m)
+ void* FindCodeForNativeMethod(ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 55441c9..e18d10f 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -34,11 +34,11 @@ struct iovec;
namespace art {
class ArtField;
+class ArtMethod;
union JValue;
class Thread;
namespace mirror {
- class ArtMethod;
class Class;
class Object;
class Throwable;
@@ -74,7 +74,7 @@ static inline void expandBufAddRefTypeId(ExpandBuf* pReply, RefTypeId id) { expa
static inline void expandBufAddFrameId(ExpandBuf* pReply, FrameId id) { expandBufAdd8BE(pReply, id); }
struct EventLocation {
- mirror::ArtMethod* method;
+ ArtMethod* method;
uint32_t dex_pc;
};
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index ff75268..612af8b 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -22,6 +22,7 @@
#include <unistd.h>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/stringprintf.h"
#include "debugger.h"
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 5dc739e..bc9545b 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -18,11 +18,11 @@
#include <dlfcn.h>
+#include "art_method-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "interpreter/interpreter.h"
#include "jit_code_cache.h"
#include "jit_instrumentation.h"
-#include "mirror/art_method-inl.h"
#include "runtime.h"
#include "runtime_options.h"
#include "thread_list.h"
@@ -100,7 +100,7 @@ bool Jit::LoadCompiler(std::string* error_msg) {
*error_msg = "JIT couldn't find jit_unload entry point";
return false;
}
- jit_compile_method_ = reinterpret_cast<bool (*)(void*, mirror::ArtMethod*, Thread*)>(
+ jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*)>(
dlsym(jit_library_handle_, "jit_compile_method"));
if (jit_compile_method_ == nullptr) {
dlclose(jit_library_handle_);
@@ -126,7 +126,7 @@ bool Jit::LoadCompiler(std::string* error_msg) {
return true;
}
-bool Jit::CompileMethod(mirror::ArtMethod* method, Thread* self) {
+bool Jit::CompileMethod(ArtMethod* method, Thread* self) {
DCHECK(!method->IsRuntimeMethod());
if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) {
VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to breakpoint";
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 8f92453..dbd8977 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -30,13 +30,10 @@
namespace art {
+class ArtMethod;
class CompilerCallbacks;
struct RuntimeArgumentMap;
-namespace mirror {
-class ArtMethod;
-} // namespace mirror
-
namespace jit {
class JitCodeCache;
@@ -50,7 +47,7 @@ class Jit {
virtual ~Jit();
static Jit* Create(JitOptions* options, std::string* error_msg);
- bool CompileMethod(mirror::ArtMethod* method, Thread* self)
+ bool CompileMethod(ArtMethod* method, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CreateInstrumentationCache(size_t compile_threshold);
void CreateThreadPool();
@@ -79,7 +76,7 @@ class Jit {
void* jit_compiler_handle_;
void* (*jit_load_)(CompilerCallbacks**);
void (*jit_unload_)(void*);
- bool (*jit_compile_method_)(void*, mirror::ArtMethod*, Thread*);
+ bool (*jit_compile_method_)(void*, ArtMethod*, Thread*);
// Performance monitoring.
bool dump_info_on_shutdown_;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4d367e0..cd5f4cb 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -18,8 +18,8 @@
#include <sstream>
+#include "art_method-inl.h"
#include "mem_map.h"
-#include "mirror/art_method-inl.h"
#include "oat_file-inl.h"
namespace art {
@@ -58,7 +58,7 @@ JitCodeCache::JitCodeCache(MemMap* mem_map)
code_cache_end_ = mem_map->End();
}
-bool JitCodeCache::ContainsMethod(mirror::ArtMethod* method) const {
+bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
}
@@ -93,7 +93,7 @@ uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const ui
return data_cache_ptr_ - size;
}
-const void* JitCodeCache::GetCodeFor(mirror::ArtMethod* method) {
+const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
const void* code = method->GetEntryPointFromQuickCompiledCode();
if (ContainsCodePtr(code)) {
return code;
@@ -106,7 +106,7 @@ const void* JitCodeCache::GetCodeFor(mirror::ArtMethod* method) {
return nullptr;
}
-void JitCodeCache::SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr) {
+void JitCodeCache::SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) {
DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
<< old_code_ptr;
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 8b76647..c1ea921 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -31,13 +31,10 @@
namespace art {
+class ArtMethod;
class CompiledMethod;
class CompilerCallbacks;
-namespace mirror {
-class ArtMethod;
-} // namespcae mirror
-
namespace jit {
class JitInstrumentationCache;
@@ -80,7 +77,7 @@ class JitCodeCache {
}
// Return true if the code cache contains the code pointer which si the entrypoint of the method.
- bool ContainsMethod(mirror::ArtMethod* method) const
+ bool ContainsMethod(ArtMethod* method) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Return true if the code cache contains a code ptr.
@@ -95,12 +92,12 @@ class JitCodeCache {
LOCKS_EXCLUDED(lock_);
// Get code for a method, returns null if it is not in the jit cache.
- const void* GetCodeFor(mirror::ArtMethod* method)
+ const void* GetCodeFor(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
// Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the
// entrypoint isn't within the cache.
- void SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr)
+ void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
private:
@@ -125,10 +122,9 @@ class JitCodeCache {
const uint8_t* data_cache_begin_;
const uint8_t* data_cache_end_;
size_t num_methods_;
- // TODO: This relies on methods not moving.
// This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
// required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
- SafeMap<mirror::ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_);
+ SafeMap<ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_);
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc
index afa5a3e..cd123b9 100644
--- a/runtime/jit/jit_code_cache_test.cc
+++ b/runtime/jit/jit_code_cache_test.cc
@@ -16,9 +16,9 @@
#include "common_runtime_test.h"
+#include "art_method-inl.h"
#include "class_linker.h"
#include "jit_code_cache.h"
-#include "mirror/art_method-inl.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -50,15 +50,15 @@ TEST_F(JitCodeCacheTest, TestCoverage) {
ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code));
ASSERT_EQ(code_cache->NumMethods(), 1u);
ClassLinker* const cl = Runtime::Current()->GetClassLinker();
- auto h_method = hs.NewHandle(cl->AllocArtMethod(soa.Self()));
- ASSERT_FALSE(code_cache->ContainsMethod(h_method.Get()));
- h_method->SetEntryPointFromQuickCompiledCode(reserved_code);
- ASSERT_TRUE(code_cache->ContainsMethod(h_method.Get()));
- ASSERT_EQ(code_cache->GetCodeFor(h_method.Get()), reserved_code);
+ auto* method = cl->AllocArtMethodArray(soa.Self(), 1);
+ ASSERT_FALSE(code_cache->ContainsMethod(method));
+ method->SetEntryPointFromQuickCompiledCode(reserved_code);
+ ASSERT_TRUE(code_cache->ContainsMethod(method));
+ ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code);
// Save the code and then change it.
- code_cache->SaveCompiledCode(h_method.Get(), reserved_code);
- h_method->SetEntryPointFromQuickCompiledCode(nullptr);
- ASSERT_EQ(code_cache->GetCodeFor(h_method.Get()), reserved_code);
+ code_cache->SaveCompiledCode(method, reserved_code);
+ method->SetEntryPointFromQuickCompiledCode(nullptr);
+ ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code);
const uint8_t data_arr[] = {1, 2, 3, 4, 5};
uint8_t* data_ptr = code_cache->AddDataArray(soa.Self(), data_arr, data_arr + sizeof(data_arr));
ASSERT_TRUE(data_ptr != nullptr);
@@ -76,7 +76,8 @@ TEST_F(JitCodeCacheTest, TestOverflow) {
size_t data_bytes = 0;
constexpr size_t kCodeArrSize = 4 * KB;
constexpr size_t kDataArrSize = 4 * KB;
- uint8_t data_arr[kDataArrSize] = {53};
+ uint8_t data_arr[kDataArrSize];
+ std::fill_n(data_arr, arraysize(data_arr), 53);
// Add code and data until we are full.
uint8_t* code_ptr = nullptr;
uint8_t* data_ptr = nullptr;
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 3232674..1e56cdc 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -16,9 +16,9 @@
#include "jit_instrumentation.h"
+#include "art_method-inl.h"
#include "jit.h"
#include "jit_code_cache.h"
-#include "mirror/art_method-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
@@ -26,7 +26,7 @@ namespace jit {
class JitCompileTask : public Task {
public:
- explicit JitCompileTask(mirror::ArtMethod* method, JitInstrumentationCache* cache)
+ explicit JitCompileTask(ArtMethod* method, JitInstrumentationCache* cache)
: method_(method), cache_(cache) {
}
@@ -45,7 +45,7 @@ class JitCompileTask : public Task {
}
private:
- mirror::ArtMethod* const method_;
+ ArtMethod* const method_;
JitInstrumentationCache* const cache_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
@@ -63,7 +63,7 @@ void JitInstrumentationCache::DeleteThreadPool() {
thread_pool_.reset();
}
-void JitInstrumentationCache::SignalCompiled(Thread* self, mirror::ArtMethod* method) {
+void JitInstrumentationCache::SignalCompiled(Thread* self, ArtMethod* method) {
ScopedObjectAccessUnchecked soa(self);
jmethodID method_id = soa.EncodeMethod(method);
MutexLock mu(self, lock_);
@@ -73,7 +73,7 @@ void JitInstrumentationCache::SignalCompiled(Thread* self, mirror::ArtMethod* me
}
}
-void JitInstrumentationCache::AddSamples(Thread* self, mirror::ArtMethod* method, size_t count) {
+void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t count) {
ScopedObjectAccessUnchecked soa(self);
// Since we don't have on-stack replacement, some methods can remain in the interpreter longer
// than we want resulting in samples even after the method is compiled.
@@ -101,11 +101,13 @@ void JitInstrumentationCache::AddSamples(Thread* self, mirror::ArtMethod* method
}
if (is_hot) {
if (thread_pool_.get() != nullptr) {
- thread_pool_->AddTask(self, new JitCompileTask(method->GetInterfaceMethodIfProxy(), this));
+ thread_pool_->AddTask(self, new JitCompileTask(
+ method->GetInterfaceMethodIfProxy(sizeof(void*)), this));
thread_pool_->StartWorkers(self);
} else {
VLOG(jit) << "Compiling hot method " << PrettyMethod(method);
- Runtime::Current()->GetJit()->CompileMethod(method->GetInterfaceMethodIfProxy(), self);
+ Runtime::Current()->GetJit()->CompileMethod(
+ method->GetInterfaceMethodIfProxy(sizeof(void*)), self);
}
}
}
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 72acaef..27894eb 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -31,12 +31,12 @@
namespace art {
namespace mirror {
- class ArtMethod;
class Class;
class Object;
class Throwable;
} // namespace mirror
class ArtField;
+class ArtMethod;
union JValue;
class Thread;
@@ -46,9 +46,9 @@ namespace jit {
class JitInstrumentationCache {
public:
explicit JitInstrumentationCache(size_t hot_method_threshold);
- void AddSamples(Thread* self, mirror::ArtMethod* method, size_t samples)
+ void AddSamples(Thread* self, ArtMethod* method, size_t samples)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SignalCompiled(Thread* self, mirror::ArtMethod* method)
+ void SignalCompiled(Thread* self, ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CreateThreadPool();
void DeleteThreadPool();
@@ -67,31 +67,31 @@ class JitInstrumentationListener : public instrumentation::InstrumentationListen
explicit JitInstrumentationListener(JitInstrumentationCache* cache);
virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
- mirror::ArtMethod* method, uint32_t /*dex_pc*/)
+ ArtMethod* method, uint32_t /*dex_pc*/)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
instrumentation_cache_->AddSamples(thread, method, 1);
}
virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ ArtMethod* /*method*/, uint32_t /*dex_pc*/,
const JValue& /*return_value*/)
OVERRIDE { }
virtual void MethodUnwind(Thread* /*thread*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
+ ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
virtual void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ ArtMethod* /*method*/, uint32_t /*dex_pc*/,
ArtField* /*field*/) OVERRIDE { }
virtual void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ ArtMethod* /*method*/, uint32_t /*dex_pc*/,
ArtField* /*field*/, const JValue& /*field_value*/)
OVERRIDE { }
virtual void ExceptionCaught(Thread* /*thread*/,
mirror::Throwable* /*exception_object*/) OVERRIDE { }
virtual void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { }
+ ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { }
// We only care about how many dex instructions were executed in the Jit.
- virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK_LE(dex_pc_offset, 0);
instrumentation_cache_->AddSamples(thread, method, 1);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index f435467..6ab4455 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -24,6 +24,7 @@
#include <vector>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "atomic.h"
#include "base/allocator.h"
#include "base/logging.h"
@@ -38,7 +39,6 @@
#include "interpreter/interpreter.h"
#include "jni_env_ext.h"
#include "java_vm_ext.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field-inl.h"
@@ -126,17 +126,18 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
if (c == nullptr) {
return nullptr;
}
- mirror::ArtMethod* method = nullptr;
+ ArtMethod* method = nullptr;
+ auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
if (is_static) {
- method = c->FindDirectMethod(name, sig);
+ method = c->FindDirectMethod(name, sig, pointer_size);
} else if (c->IsInterface()) {
- method = c->FindInterfaceMethod(name, sig);
+ method = c->FindInterfaceMethod(name, sig, pointer_size);
} else {
- method = c->FindVirtualMethod(name, sig);
+ method = c->FindVirtualMethod(name, sig, pointer_size);
if (method == nullptr) {
// No virtual method matching the signature. Search declared
// private methods and constructors.
- method = c->FindDeclaredDirectMethod(name, sig);
+ method = c->FindDeclaredDirectMethod(name, sig, pointer_size);
}
}
if (method == nullptr || method->IsStatic() != is_static) {
@@ -148,7 +149,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
+ ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
// If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
return soa.Decode<mirror::ClassLoader*>(soa.Self()->GetClassLoaderOverride());
@@ -312,26 +313,19 @@ static JavaVMExt* JavaVmExtFromEnv(JNIEnv* env) {
}
template <bool kNative>
-static mirror::ArtMethod* FindMethod(mirror::Class* c,
- const StringPiece& name,
- const StringPiece& sig)
+static ArtMethod* FindMethod(mirror::Class* c, const StringPiece& name, const StringPiece& sig)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
- mirror::ArtMethod* method = c->GetDirectMethod(i);
- if (kNative == method->IsNative() &&
- name == method->GetName() && method->GetSignature() == sig) {
- return method;
+ auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ for (auto& method : c->GetDirectMethods(pointer_size)) {
+ if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) {
+ return &method;
}
}
-
- for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
- mirror::ArtMethod* method = c->GetVirtualMethod(i);
- if (kNative == method->IsNative() &&
- name == method->GetName() && method->GetSignature() == sig) {
- return method;
+ for (auto& method : c->GetVirtualMethods(pointer_size)) {
+ if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) {
+ return &method;
}
}
-
return nullptr;
}
@@ -366,7 +360,7 @@ class JNI {
static jmethodID FromReflectedMethod(JNIEnv* env, jobject jlr_method) {
CHECK_NON_NULL_ARGUMENT(jlr_method);
ScopedObjectAccess soa(env);
- return soa.EncodeMethod(mirror::ArtMethod::FromReflectedMethod(soa, jlr_method));
+ return soa.EncodeMethod(ArtMethod::FromReflectedMethod(soa, jlr_method));
}
static jfieldID FromReflectedField(JNIEnv* env, jobject jlr_field) {
@@ -384,8 +378,7 @@ class JNI {
static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) {
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
- mirror::ArtMethod* m = soa.DecodeMethod(mid);
- CHECK(!kMovingMethods);
+ ArtMethod* m = soa.DecodeMethod(mid);
mirror::AbstractMethod* method;
if (m->IsConstructor()) {
method = mirror::Constructor::CreateFromArtMethod(soa.Self(), m);
@@ -2151,7 +2144,7 @@ class JNI {
// Note: the right order is to try to find the method locally
// first, either as a direct or a virtual method. Then move to
// the parent.
- mirror::ArtMethod* m = nullptr;
+ ArtMethod* m = nullptr;
bool warn_on_going_to_parent = down_cast<JNIEnvExt*>(env)->vm->IsCheckJniEnabled();
for (mirror::Class* current_class = c;
current_class != nullptr;
@@ -2207,17 +2200,16 @@ class JNI {
VLOG(jni) << "[Unregistering JNI native methods for " << PrettyClass(c) << "]";
size_t unregistered_count = 0;
- for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
- mirror::ArtMethod* m = c->GetDirectMethod(i);
- if (m->IsNative()) {
- m->UnregisterNative();
+ auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ for (auto& m : c->GetDirectMethods(pointer_size)) {
+ if (m.IsNative()) {
+ m.UnregisterNative();
unregistered_count++;
}
}
- for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
- mirror::ArtMethod* m = c->GetVirtualMethod(i);
- if (m->IsNative()) {
- m->UnregisterNative();
+ for (auto& m : c->GetVirtualMethods(pointer_size)) {
+ if (m.IsNative()) {
+ m.UnregisterNative();
unregistered_count++;
}
}
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 581ef0e..99eb365 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -16,9 +16,9 @@
#include "jni_internal.h"
+#include "art_method-inl.h"
#include "common_compiler_test.h"
#include "java_vm_ext.h"
-#include "mirror/art_method-inl.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
diff --git a/runtime/linear_alloc.cc b/runtime/linear_alloc.cc
index fe6bee6..43e81d9 100644
--- a/runtime/linear_alloc.cc
+++ b/runtime/linear_alloc.cc
@@ -23,6 +23,11 @@ namespace art {
LinearAlloc::LinearAlloc(ArenaPool* pool) : lock_("linear alloc"), allocator_(pool) {
}
+void* LinearAlloc::Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) {
+ MutexLock mu(self, lock_);
+ return allocator_.Realloc(ptr, old_size, new_size);
+}
+
void* LinearAlloc::Alloc(Thread* self, size_t size) {
MutexLock mu(self, lock_);
return allocator_.Alloc(size);
@@ -33,4 +38,14 @@ size_t LinearAlloc::GetUsedMemory() const {
return allocator_.BytesUsed();
}
+ArenaPool* LinearAlloc::GetArenaPool() {
+ MutexLock mu(Thread::Current(), lock_);
+ return allocator_.GetArenaPool();
+}
+
+bool LinearAlloc::Contains(void* ptr) const {
+ MutexLock mu(Thread::Current(), lock_);
+ return allocator_.Contains(ptr);
+}
+
} // namespace art
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index fcabcc8..c10ddfd 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -28,7 +28,10 @@ class LinearAlloc {
public:
explicit LinearAlloc(ArenaPool* pool);
- void* Alloc(Thread* self, size_t size);
+ void* Alloc(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
+
+ // Realloc never frees the input pointer, it is the caller's job to do this if necessary.
+ void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) LOCKS_EXCLUDED(lock_);
// Allocate and construct an array of structs of type T.
template<class T>
@@ -37,7 +40,12 @@ class LinearAlloc {
}
// Return the number of bytes used in the allocator.
- size_t GetUsedMemory() const;
+ size_t GetUsedMemory() const LOCKS_EXCLUDED(lock_);
+
+ ArenaPool* GetArenaPool() LOCKS_EXCLUDED(lock_);
+
+ // Return true if the linear alloc contrains an address.
+ bool Contains(void* ptr) const;
private:
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index 81c656b..91a9870 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -16,14 +16,14 @@
#include "abstract_method.h"
-#include "mirror/art_method-inl.h"
+#include "art_method-inl.h"
namespace art {
namespace mirror {
-bool AbstractMethod::CreateFromArtMethod(mirror::ArtMethod* method) {
- auto* interface_method = method->GetInterfaceMethodIfProxy();
- SetFieldObject<false>(ArtMethodOffset(), method);
+bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) {
+ auto* interface_method = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ SetArtMethod(method);
SetFieldObject<false>(DeclaringClassOffset(), method->GetDeclaringClass());
SetFieldObject<false>(
DeclaringClassOfOverriddenMethodOffset(), interface_method->GetDeclaringClass());
@@ -32,8 +32,12 @@ bool AbstractMethod::CreateFromArtMethod(mirror::ArtMethod* method) {
return true;
}
-mirror::ArtMethod* AbstractMethod::GetArtMethod() {
- return GetFieldObject<mirror::ArtMethod>(ArtMethodOffset());
+ArtMethod* AbstractMethod::GetArtMethod() {
+ return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
+}
+
+void AbstractMethod::SetArtMethod(ArtMethod* method) {
+ SetField64<false>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
}
mirror::Class* AbstractMethod::GetDeclaringClass() {
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index ef51d7f..99d697a 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -26,18 +26,19 @@
namespace art {
struct AbstractMethodOffsets;
+class ArtMethod;
namespace mirror {
-class ArtMethod;
-
// C++ mirror of java.lang.reflect.AbstractMethod.
class MANAGED AbstractMethod : public AccessibleObject {
public:
// Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
- bool CreateFromArtMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CreateFromArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Only used by the image writer.
+ void SetArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
@@ -57,9 +58,10 @@ class MANAGED AbstractMethod : public AccessibleObject {
return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, dex_method_index_));
}
- HeapReference<mirror::ArtMethod> art_method_;
HeapReference<mirror::Class> declaring_class_;
HeapReference<mirror::Class> declaring_class_of_overridden_method_;
+ uint32_t padding_;
+ uint64_t art_method_;
uint32_t access_flags_;
uint32_t dex_method_index_;
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index e93717e..d343292 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -20,19 +20,19 @@
#include "array.h"
#include "base/bit_utils.h"
+#include "base/casts.h"
#include "base/logging.h"
#include "base/stringprintf.h"
-#include "base/casts.h"
-#include "class.h"
+#include "class-inl.h"
#include "gc/heap-inl.h"
#include "thread.h"
namespace art {
namespace mirror {
-inline uint32_t Array::ClassSize() {
+inline uint32_t Array::ClassSize(size_t pointer_size) {
uint32_t vtable_entries = Object::kVTableLength;
- return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
+ return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -371,6 +371,30 @@ inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, i
}
}
+template<typename T>
+inline T PointerArray::GetElementPtrSize(uint32_t idx, size_t ptr_size) {
+ // C style casts here since we sometimes have T be a pointer, or sometimes an integer
+ // (for stack traces).
+ if (ptr_size == 8) {
+ return (T)static_cast<uintptr_t>(AsLongArray()->GetWithoutChecks(idx));
+ }
+ DCHECK_EQ(ptr_size, 4u);
+ return (T)static_cast<uintptr_t>(AsIntArray()->GetWithoutChecks(idx));
+}
+
+template<bool kTransactionActive, bool kUnchecked, typename T>
+inline void PointerArray::SetElementPtrSize(uint32_t idx, T element, size_t ptr_size) {
+ if (ptr_size == 8) {
+ (kUnchecked ? down_cast<LongArray*>(static_cast<Object*>(this)) : AsLongArray())->
+ SetWithoutChecks<kTransactionActive>(idx, (uint64_t)(element));
+ } else {
+ DCHECK_EQ(ptr_size, 4u);
+ DCHECK_LE((uintptr_t)element, 0xFFFFFFFFu);
+ (kUnchecked ? down_cast<IntArray*>(static_cast<Object*>(this)) : AsIntArray())
+ ->SetWithoutChecks<kTransactionActive>(idx, static_cast<uint32_t>((uintptr_t)element));
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index b92f017..d72c03f 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -125,6 +125,26 @@ void Array::ThrowArrayStoreException(Object* object) {
art::ThrowArrayStoreException(object->GetClass(), this->GetClass());
}
+Array* Array::CopyOf(Thread* self, int32_t new_length) {
+ CHECK(GetClass()->GetComponentType()->IsPrimitive()) << "Will miss write barriers";
+ DCHECK_GE(new_length, 0);
+ // We may get copied by a compacting GC.
+ StackHandleScope<1> hs(self);
+ auto h_this(hs.NewHandle(this));
+ auto* heap = Runtime::Current()->GetHeap();
+ gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() :
+ heap->GetCurrentNonMovingAllocator();
+ const auto component_size = GetClass()->GetComponentSize();
+ const auto component_shift = GetClass()->GetComponentSizeShift();
+ Array* new_array = Alloc<true>(self, GetClass(), new_length, component_shift, allocator_type);
+ if (LIKELY(new_array != nullptr)) {
+ memcpy(new_array->GetRawData(component_size, 0), h_this->GetRawData(component_size, 0),
+ std::min(h_this->GetLength(), new_length) << component_shift);
+ }
+ return new_array;
+}
+
+
template <typename T> GcRoot<Class> PrimitiveArray<T>::array_class_;
// Explicitly instantiate all the primitive array types.
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 832ad68..c4f6c84 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -31,7 +31,7 @@ namespace mirror {
class MANAGED Array : public Object {
public:
// The size of a java.lang.Class representing an array.
- static uint32_t ClassSize();
+ static uint32_t ClassSize(size_t pointer_size);
// Allocates an array with the given properties, if kFillUsable is true the array will be of at
// least component_count size, however, if there's usable space at the end of the allocation the
@@ -84,6 +84,8 @@ class MANAGED Array : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Array* CopyOf(Thread* self, int32_t new_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
protected:
void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -174,6 +176,18 @@ class MANAGED PrimitiveArray : public Array {
DISALLOW_IMPLICIT_CONSTRUCTORS(PrimitiveArray);
};
+// Either an IntArray or a LongArray.
+class PointerArray : public Array {
+ public:
+ template<typename T>
+ T GetElementPtrSize(uint32_t idx, size_t ptr_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive = false, bool kUnchecked = false, typename T>
+ void SetElementPtrSize(uint32_t idx, T element, size_t ptr_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 5752a15..835b94a 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -20,6 +20,7 @@
#include "class.h"
#include "art_field-inl.h"
+#include "art_method.h"
#include "art_method-inl.h"
#include "class_loader.h"
#include "common_throws.h"
@@ -60,130 +61,157 @@ inline DexCache* Class::GetDexCache() {
return GetFieldObject<DexCache, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_));
}
-inline ObjectArray<ArtMethod>* Class::GetDirectMethods() {
+inline ArtMethod* Class::GetDirectMethodsPtr() {
DCHECK(IsLoaded() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_));
+ return GetDirectMethodsPtrUnchecked();
}
-inline void Class::SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(nullptr == GetFieldObject<ObjectArray<ArtMethod>>(
- OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)));
- DCHECK_NE(0, new_direct_methods->GetLength());
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), new_direct_methods);
+inline ArtMethod* Class::GetDirectMethodsPtrUnchecked() {
+ return reinterpret_cast<ArtMethod*>(GetField64(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)));
}
-inline ArtMethod* Class::GetDirectMethod(int32_t i) {
- return GetDirectMethods()->Get(i);
+inline ArtMethod* Class::GetVirtualMethodsPtrUnchecked() {
+ return reinterpret_cast<ArtMethod*>(GetField64(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_)));
}
-inline void Class::SetDirectMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ObjectArray<ArtMethod>* direct_methods =
- GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_));
- direct_methods->Set<false>(i, f);
+inline void Class::SetDirectMethodsPtr(ArtMethod* new_direct_methods) {
+ DCHECK(GetDirectMethodsPtrUnchecked() == nullptr);
+ SetDirectMethodsPtrUnchecked(new_direct_methods);
}
-// Returns the number of static, private, and constructor methods.
-inline uint32_t Class::NumDirectMethods() {
- return (GetDirectMethods() != nullptr) ? GetDirectMethods()->GetLength() : 0;
+inline void Class::SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods) {
+ SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_),
+ reinterpret_cast<uint64_t>(new_direct_methods));
+}
+
+inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ auto* methods = GetDirectMethodsPtrUnchecked();
+ DCHECK(methods != nullptr);
+ return reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t>(methods) +
+ ArtMethod::ObjectSize(pointer_size) * i);
+}
+
+inline ArtMethod* Class::GetDirectMethod(size_t i, size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ auto* methods = GetDirectMethodsPtr();
+ DCHECK(methods != nullptr);
+ return reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t>(methods) +
+ ArtMethod::ObjectSize(pointer_size) * i);
}
template<VerifyObjectFlags kVerifyFlags>
-inline ObjectArray<ArtMethod>* Class::GetVirtualMethods() {
- DCHECK(IsLoaded() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_));
+inline ArtMethod* Class::GetVirtualMethodsPtr() {
+ DCHECK(IsLoaded<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
+ return GetVirtualMethodsPtrUnchecked();
}
-inline void Class::SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods) {
+inline void Class::SetVirtualMethodsPtr(ArtMethod* new_virtual_methods) {
// TODO: we reassign virtual methods to grow the table for miranda
// methods.. they should really just be assigned once.
- DCHECK_NE(0, new_virtual_methods->GetLength());
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), new_virtual_methods);
-}
-
-inline uint32_t Class::NumVirtualMethods() {
- return (GetVirtualMethods() != nullptr) ? GetVirtualMethods()->GetLength() : 0;
+ SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_),
+ reinterpret_cast<uint64_t>(new_virtual_methods));
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArtMethod* Class::GetVirtualMethod(uint32_t i) {
+inline ArtMethod* Class::GetVirtualMethod(size_t i, size_t pointer_size) {
+ CheckPointerSize(pointer_size);
DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>())
<< PrettyClass(this) << " status=" << GetStatus();
- return GetVirtualMethods()->GetWithoutChecks(i);
+ return GetVirtualMethodUnchecked(i, pointer_size);
}
-inline ArtMethod* Class::GetVirtualMethodDuringLinking(uint32_t i) {
+inline ArtMethod* Class::GetVirtualMethodDuringLinking(size_t i, size_t pointer_size) {
+ CheckPointerSize(pointer_size);
DCHECK(IsLoaded() || IsErroneous());
- return GetVirtualMethods()->GetWithoutChecks(i);
+ return GetVirtualMethodUnchecked(i, pointer_size);
}
-inline void Class::SetVirtualMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ObjectArray<ArtMethod>* virtual_methods =
- GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_));
- virtual_methods->SetWithoutChecks<false>(i, f);
+inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ auto* methods = GetVirtualMethodsPtrUnchecked();
+ DCHECK(methods != nullptr);
+ return reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t>(methods) +
+ ArtMethod::ObjectSize(pointer_size) * i);
}
-inline ObjectArray<ArtMethod>* Class::GetVTable() {
+inline PointerArray* Class::GetVTable() {
DCHECK(IsResolved() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_));
+ return GetFieldObject<PointerArray>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_));
}
-inline ObjectArray<ArtMethod>* Class::GetVTableDuringLinking() {
+inline PointerArray* Class::GetVTableDuringLinking() {
DCHECK(IsLoaded() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_));
+ return GetFieldObject<PointerArray>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_));
}
-inline void Class::SetVTable(ObjectArray<ArtMethod>* new_vtable) {
+inline void Class::SetVTable(PointerArray* new_vtable) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable);
}
-inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i) {
- uint32_t offset = EmbeddedImTableOffset().Uint32Value() + i * sizeof(ImTableEntry);
- return GetFieldObject<mirror::ArtMethod>(MemberOffset(offset));
+inline MemberOffset Class::EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size) {
+ DCHECK_LT(i, kImtSize);
+ return MemberOffset(
+ EmbeddedImTableOffset(pointer_size).Uint32Value() + i * ImTableEntrySize(pointer_size));
+}
+
+inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) {
+ DCHECK(ShouldHaveEmbeddedImtAndVTable());
+ return GetFieldPtrWithSize<ArtMethod*>(
+ EmbeddedImTableEntryOffset(i, pointer_size), pointer_size);
}
-inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method) {
- uint32_t offset = EmbeddedImTableOffset().Uint32Value() + i * sizeof(ImTableEntry);
- SetFieldObject<false>(MemberOffset(offset), method);
+inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) {
+ DCHECK(ShouldHaveEmbeddedImtAndVTable());
+ SetFieldPtrWithSize<false>(EmbeddedImTableEntryOffset(i, pointer_size), method, pointer_size);
}
inline bool Class::HasVTable() {
- return (GetVTable() != nullptr) || ShouldHaveEmbeddedImtAndVTable();
+ return GetVTable() != nullptr || ShouldHaveEmbeddedImtAndVTable();
}
inline int32_t Class::GetVTableLength() {
if (ShouldHaveEmbeddedImtAndVTable()) {
return GetEmbeddedVTableLength();
}
- return (GetVTable() != nullptr) ? GetVTable()->GetLength() : 0;
+ return GetVTable() != nullptr ? GetVTable()->GetLength() : 0;
}
-inline ArtMethod* Class::GetVTableEntry(uint32_t i) {
+inline ArtMethod* Class::GetVTableEntry(uint32_t i, size_t pointer_size) {
if (ShouldHaveEmbeddedImtAndVTable()) {
- return GetEmbeddedVTableEntry(i);
+ return GetEmbeddedVTableEntry(i, pointer_size);
}
- return (GetVTable() != nullptr) ? GetVTable()->Get(i) : nullptr;
+ auto* vtable = GetVTable();
+ DCHECK(vtable != nullptr);
+ return vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size);
}
inline int32_t Class::GetEmbeddedVTableLength() {
- return GetField32(EmbeddedVTableLengthOffset());
+ return GetField32(MemberOffset(EmbeddedVTableLengthOffset()));
}
inline void Class::SetEmbeddedVTableLength(int32_t len) {
- SetField32<false>(EmbeddedVTableLengthOffset(), len);
+ SetField32<false>(MemberOffset(EmbeddedVTableLengthOffset()), len);
}
-inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i) {
- uint32_t offset = EmbeddedVTableOffset().Uint32Value() + i * sizeof(VTableEntry);
- return GetFieldObject<mirror::ArtMethod>(MemberOffset(offset));
+inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size) {
+ return MemberOffset(
+ EmbeddedVTableOffset(pointer_size).Uint32Value() + i * VTableEntrySize(pointer_size));
}
-inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) {
- uint32_t offset = EmbeddedVTableOffset().Uint32Value() + i * sizeof(VTableEntry);
- SetFieldObject<false>(MemberOffset(offset), method);
- CHECK(method == GetVTableDuringLinking()->Get(i));
+inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size) {
+ return GetFieldPtrWithSize<ArtMethod*>(EmbeddedVTableEntryOffset(i, pointer_size), pointer_size);
+}
+
+inline void Class::SetEmbeddedVTableEntryUnchecked(
+ uint32_t i, ArtMethod* method, size_t pointer_size) {
+ SetFieldPtrWithSize<false>(EmbeddedVTableEntryOffset(i, pointer_size), method, pointer_size);
+}
+
+inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) {
+ auto* vtable = GetVTableDuringLinking();
+ CHECK_EQ(method, vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size));
+ SetEmbeddedVTableEntryUnchecked(i, method, pointer_size);
}
inline bool Class::Implements(Class* klass) {
@@ -340,41 +368,43 @@ inline bool Class::IsSubClass(Class* klass) {
return false;
}
-inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) {
+inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size) {
Class* declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr) << PrettyClass(this);
DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
// TODO cache to improve lookup speed
- int32_t iftable_count = GetIfTableCount();
+ const int32_t iftable_count = GetIfTableCount();
IfTable* iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; i++) {
if (iftable->GetInterface(i) == declaring_class) {
- return iftable->GetMethodArray(i)->Get(method->GetMethodIndex());
+ return iftable->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
+ method->GetMethodIndex(), pointer_size);
}
}
return nullptr;
}
-inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method) {
+inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size) {
DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsMiranda());
// The argument method may from a super class.
// Use the index to a potentially overridden one for this instance's class.
- return GetVTableEntry(method->GetMethodIndex());
+ return GetVTableEntry(method->GetMethodIndex(), pointer_size);
}
-inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method) {
+inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size) {
DCHECK(!method->GetDeclaringClass()->IsInterface());
- return GetSuperClass()->GetVTableEntry(method->GetMethodIndex());
+ return GetSuperClass()->GetVTableEntry(method->GetMethodIndex(), pointer_size);
}
-inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method) {
+inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method,
+ size_t pointer_size) {
if (method->IsDirect()) {
return method;
}
if (method->GetDeclaringClass()->IsInterface() && !method->IsMiranda()) {
- return FindVirtualMethodForInterface(method);
+ return FindVirtualMethodForInterface(method, pointer_size);
}
- return FindVirtualMethodForVirtual(method);
+ return FindVirtualMethodForVirtual(method, pointer_size);
}
inline IfTable* Class::GetIfTable() {
@@ -406,24 +436,24 @@ inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
: ClassOffset();
}
-inline MemberOffset Class::GetFirstReferenceStaticFieldOffset() {
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_size) {
DCHECK(IsResolved());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
if (ShouldHaveEmbeddedImtAndVTable()) {
// Static fields come after the embedded tables.
- base = mirror::Class::ComputeClassSize(true, GetEmbeddedVTableLength(),
- 0, 0, 0, 0, 0);
+ base = mirror::Class::ComputeClassSize(
+ true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size);
}
return MemberOffset(base);
}
-inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking() {
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) {
DCHECK(IsLoaded());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
if (ShouldHaveEmbeddedImtAndVTable()) {
// Static fields come after the embedded tables.
base = mirror::Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(),
- 0, 0, 0, 0, 0);
+ 0, 0, 0, 0, 0, pointer_size);
}
return MemberOffset(base);
}
@@ -499,14 +529,12 @@ inline uint32_t Class::GetAccessFlags() {
// circularity issue during loading the names of its members
DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
- this == String::GetJavaLangString() ||
- this == ArtMethod::GetJavaLangReflectArtMethod())
+ this == String::GetJavaLangString())
<< "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
<< " IsRetired=" << IsRetired<kVerifyFlags>()
<< " IsErroneous=" <<
IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
<< " IsString=" << (this == String::GetJavaLangString())
- << " IsArtMethod=" << (this == ArtMethod::GetJavaLangReflectArtMethod())
<< " descriptor=" << PrettyDescriptor(this);
return GetField32<kVerifyFlags>(AccessFlagsOffset());
}
@@ -594,20 +622,20 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
uint32_t num_16bit_static_fields,
uint32_t num_32bit_static_fields,
uint32_t num_64bit_static_fields,
- uint32_t num_ref_static_fields) {
+ uint32_t num_ref_static_fields,
+ size_t pointer_size) {
// Space used by java.lang.Class and its instance fields.
uint32_t size = sizeof(Class);
// Space used by embedded tables.
if (has_embedded_tables) {
- uint32_t embedded_imt_size = kImtSize * sizeof(ImTableEntry);
- uint32_t embedded_vtable_size = num_vtable_entries * sizeof(VTableEntry);
- size += embedded_imt_size +
- sizeof(int32_t) /* vtable len */ +
- embedded_vtable_size;
+ const uint32_t embedded_imt_size = kImtSize * ImTableEntrySize(pointer_size);
+ const uint32_t embedded_vtable_size = num_vtable_entries * VTableEntrySize(pointer_size);
+ size = RoundUp(size + sizeof(uint32_t) /* embedded vtable len */, pointer_size) +
+ embedded_imt_size + embedded_vtable_size;
}
// Space used by reference statics.
- size += num_ref_static_fields * sizeof(HeapReference<Object>);
+ size += num_ref_static_fields * sizeof(HeapReference<Object>);
if (!IsAligned<8>(size) && num_64bit_static_fields > 0) {
uint32_t gap = 8 - (size & 0x7);
size += gap; // will be padded
@@ -629,10 +657,8 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
}
// Guaranteed to be at least 4 byte aligned. No need for further alignments.
// Space used for primitive static fields.
- size += (num_8bit_static_fields * sizeof(uint8_t)) +
- (num_16bit_static_fields * sizeof(uint16_t)) +
- (num_32bit_static_fields * sizeof(uint32_t)) +
- (num_64bit_static_fields * sizeof(uint64_t));
+ size += num_8bit_static_fields * sizeof(uint8_t) + num_16bit_static_fields * sizeof(uint16_t) +
+ num_32bit_static_fields * sizeof(uint32_t) + num_64bit_static_fields * sizeof(uint64_t);
return size;
}
@@ -651,40 +677,10 @@ inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor)
// allocated with the right size for those. Also, unresolved classes don't have fields
// linked yet.
VisitStaticFieldsReferences<kVisitClass>(this, visitor);
- if (ShouldHaveEmbeddedImtAndVTable()) {
- VisitEmbeddedImtAndVTable(visitor);
- }
- }
-}
-
-template<typename Visitor>
-inline void Class::VisitEmbeddedImtAndVTable(const Visitor& visitor) {
- uint32_t pos = sizeof(mirror::Class);
-
- size_t count = kImtSize;
- for (size_t i = 0; i < count; ++i) {
- MemberOffset offset = MemberOffset(pos);
- visitor(this, offset, true);
- pos += sizeof(ImTableEntry);
- }
-
- // Skip vtable length.
- pos += sizeof(int32_t);
-
- count = GetEmbeddedVTableLength();
- for (size_t i = 0; i < count; ++i) {
- MemberOffset offset = MemberOffset(pos);
- visitor(this, offset, true);
- pos += sizeof(VTableEntry);
}
}
template<ReadBarrierOption kReadBarrierOption>
-inline bool Class::IsArtMethodClass() const {
- return this == ArtMethod::GetJavaLangReflectArtMethod<kReadBarrierOption>();
-}
-
-template<ReadBarrierOption kReadBarrierOption>
inline bool Class::IsReferenceClass() const {
return this == Reference::GetJavaLangRefReference<kReadBarrierOption>();
}
@@ -812,27 +808,92 @@ inline ObjectArray<String>* Class::GetDexCacheStrings() {
}
template<class Visitor>
-void mirror::Class::VisitFieldRoots(Visitor& visitor) {
+void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) {
ArtField* const sfields = GetSFieldsUnchecked();
// Since we visit class roots while we may be writing these fields, check against null.
- // TODO: Is this safe for concurrent compaction?
if (sfields != nullptr) {
for (size_t i = 0, count = NumStaticFields(); i < count; ++i) {
+ auto* f = &sfields[i];
if (kIsDebugBuild && IsResolved()) {
- CHECK_EQ(sfields[i].GetDeclaringClass(), this) << GetStatus();
+ CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus();
}
- visitor.VisitRoot(sfields[i].DeclaringClassRoot().AddressWithoutBarrier());
+ f->VisitRoots(visitor);
}
}
ArtField* const ifields = GetIFieldsUnchecked();
if (ifields != nullptr) {
for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) {
+ auto* f = &ifields[i];
if (kIsDebugBuild && IsResolved()) {
- CHECK_EQ(ifields[i].GetDeclaringClass(), this) << GetStatus();
+ CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus();
}
- visitor.VisitRoot(ifields[i].DeclaringClassRoot().AddressWithoutBarrier());
+ f->VisitRoots(visitor);
}
}
+ for (auto& m : GetDirectMethods(pointer_size)) {
+ m.VisitRoots(visitor);
+ }
+ for (auto& m : GetVirtualMethods(pointer_size)) {
+ m.VisitRoots(visitor);
+ }
+}
+
+inline StrideIterator<ArtMethod> Class::DirectMethodsBegin(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ auto* methods = GetDirectMethodsPtrUnchecked();
+ auto stride = ArtMethod::ObjectSize(pointer_size);
+ return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods), stride);
+}
+
+inline StrideIterator<ArtMethod> Class::DirectMethodsEnd(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ auto* methods = GetDirectMethodsPtrUnchecked();
+ auto stride = ArtMethod::ObjectSize(pointer_size);
+ auto count = NumDirectMethods();
+ return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods) + stride * count, stride);
+}
+
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ return MakeIterationRange(DirectMethodsBegin(pointer_size), DirectMethodsEnd(pointer_size));
+}
+
+inline StrideIterator<ArtMethod> Class::VirtualMethodsBegin(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ auto* methods = GetVirtualMethodsPtrUnchecked();
+ auto stride = ArtMethod::ObjectSize(pointer_size);
+ return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods), stride);
+}
+
+inline StrideIterator<ArtMethod> Class::VirtualMethodsEnd(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ auto* methods = GetVirtualMethodsPtrUnchecked();
+ auto stride = ArtMethod::ObjectSize(pointer_size);
+ auto count = NumVirtualMethods();
+ return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods) + stride * count, stride);
+}
+
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) {
+ return MakeIterationRange(VirtualMethodsBegin(pointer_size), VirtualMethodsEnd(pointer_size));
+}
+
+inline MemberOffset Class::EmbeddedImTableOffset(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ // Round up since we want the embedded imt and vtable to be pointer size aligned in case 64 bits.
+ // Add 32 bits for embedded vtable length.
+ return MemberOffset(
+ RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size));
+}
+
+inline MemberOffset Class::EmbeddedVTableOffset(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ return MemberOffset(EmbeddedImTableOffset(pointer_size).Uint32Value() +
+ kImtSize * ImTableEntrySize(pointer_size));
+}
+
+inline void Class::CheckPointerSize(size_t pointer_size) {
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+ DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
} // namespace mirror
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 56c586a..f0b7bfd 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -145,9 +145,10 @@ void Class::SetDexCache(DexCache* new_dex_cache) {
}
void Class::SetClassSize(uint32_t new_class_size) {
- if (kIsDebugBuild && (new_class_size < GetClassSize())) {
- DumpClass(LOG(ERROR), kDumpClassFullDetail);
- CHECK_GE(new_class_size, GetClassSize()) << " class=" << PrettyTypeOf(this);
+ if (kIsDebugBuild && new_class_size < GetClassSize()) {
+ DumpClass(LOG(INTERNAL_FATAL), kDumpClassFullDetail);
+ LOG(INTERNAL_FATAL) << new_class_size << " vs " << GetClassSize();
+ LOG(FATAL) << " class=" << PrettyTypeOf(this);
}
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size);
@@ -205,10 +206,11 @@ void Class::DumpClass(std::ostream& os, int flags) {
return;
}
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
StackHandleScope<2> hs(self);
Handle<mirror::Class> h_this(hs.NewHandle(this));
Handle<mirror::Class> h_super(hs.NewHandle(GetSuperClass()));
+ auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
std::string temp;
os << "----- " << (IsInterface() ? "interface" : "class") << " "
@@ -244,12 +246,13 @@ void Class::DumpClass(std::ostream& os, int flags) {
os << " vtable (" << h_this->NumVirtualMethods() << " entries, "
<< (h_super.Get() != nullptr ? h_super->NumVirtualMethods() : 0) << " in super):\n";
for (size_t i = 0; i < NumVirtualMethods(); ++i) {
- os << StringPrintf(" %2zd: %s\n", i,
- PrettyMethod(h_this->GetVirtualMethodDuringLinking(i)).c_str());
+ os << StringPrintf(" %2zd: %s\n", i, PrettyMethod(
+ h_this->GetVirtualMethodDuringLinking(i, image_pointer_size)).c_str());
}
os << " direct methods (" << h_this->NumDirectMethods() << " entries):\n";
for (size_t i = 0; i < h_this->NumDirectMethods(); ++i) {
- os << StringPrintf(" %2zd: %s\n", i, PrettyMethod(h_this->GetDirectMethod(i)).c_str());
+ os << StringPrintf(" %2zd: %s\n", i, PrettyMethod(
+ h_this->GetDirectMethod(i, image_pointer_size)).c_str());
}
if (h_this->NumStaticFields() > 0) {
os << " static fields (" << h_this->NumStaticFields() << " entries):\n";
@@ -275,7 +278,7 @@ void Class::DumpClass(std::ostream& os, int flags) {
}
void Class::SetReferenceInstanceOffsets(uint32_t new_reference_offsets) {
- if (kIsDebugBuild && (new_reference_offsets != kClassWalkSuper)) {
+ if (kIsDebugBuild && new_reference_offsets != kClassWalkSuper) {
// Sanity check that the number of bits set in the reference offset bitmap
// agrees with the number of references
uint32_t count = 0;
@@ -342,9 +345,10 @@ void Class::SetClassLoader(ClassLoader* new_class_loader) {
}
}
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature) {
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
+ size_t pointer_size) {
// Check the current class before checking the interfaces.
- ArtMethod* method = FindDeclaredVirtualMethod(name, signature);
+ ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
}
@@ -352,7 +356,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece
int32_t iftable_count = GetIfTableCount();
IfTable* iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; ++i) {
- method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature);
+ method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
}
@@ -360,9 +364,10 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece
return nullptr;
}
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature) {
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature,
+ size_t pointer_size) {
// Check the current class before checking the interfaces.
- ArtMethod* method = FindDeclaredVirtualMethod(name, signature);
+ ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
}
@@ -370,7 +375,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature&
int32_t iftable_count = GetIfTableCount();
IfTable* iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; ++i) {
- method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature);
+ method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
}
@@ -378,9 +383,10 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature&
return nullptr;
}
-ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
+ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ size_t pointer_size) {
// Check the current class before checking the interfaces.
- ArtMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx);
+ ArtMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
return method;
}
@@ -388,7 +394,8 @@ ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_me
int32_t iftable_count = GetIfTableCount();
IfTable* iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; ++i) {
- method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(dex_cache, dex_method_idx);
+ method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(
+ dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
return method;
}
@@ -396,41 +403,42 @@ ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_me
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) {
- for (size_t i = 0; i < NumDirectMethods(); ++i) {
- ArtMethod* method = GetDirectMethod(i);
- if (name == method->GetName() && method->GetSignature() == signature) {
- return method;
+ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
+ size_t pointer_size) {
+ for (auto& method : GetDirectMethods(pointer_size)) {
+ if (name == method.GetName() && method.GetSignature() == signature) {
+ return &method;
}
}
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature) {
- for (size_t i = 0; i < NumDirectMethods(); ++i) {
- ArtMethod* method = GetDirectMethod(i);
- if (name == method->GetName() && signature == method->GetSignature()) {
- return method;
+ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
+ size_t pointer_size) {
+ for (auto& method : GetDirectMethods(pointer_size)) {
+ if (name == method.GetName() && signature == method.GetSignature()) {
+ return &method;
}
}
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
+ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ size_t pointer_size) {
if (GetDexCache() == dex_cache) {
- for (size_t i = 0; i < NumDirectMethods(); ++i) {
- ArtMethod* method = GetDirectMethod(i);
- if (method->GetDexMethodIndex() == dex_method_idx) {
- return method;
+ for (auto& method : GetDirectMethods(pointer_size)) {
+ if (method.GetDexMethodIndex() == dex_method_idx) {
+ return &method;
}
}
}
return nullptr;
}
-ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature) {
+ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature,
+ size_t pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
- ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature);
+ ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
}
@@ -438,9 +446,10 @@ ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& s
return nullptr;
}
-ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature) {
+ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature,
+ size_t pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
- ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature);
+ ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
}
@@ -448,9 +457,10 @@ ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& sig
return nullptr;
}
-ArtMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
+ArtMethod* Class::FindDirectMethod(
+ const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
- ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx);
+ ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
return method;
}
@@ -458,44 +468,44 @@ ArtMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_metho
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) {
- for (size_t i = 0; i < NumVirtualMethods(); ++i) {
- ArtMethod* method = GetVirtualMethod(i);
- if (name == method->GetName() && method->GetSignature() == signature) {
- return method;
+ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
+ size_t pointer_size) {
+ for (auto& method : GetVirtualMethods(pointer_size)) {
+ if (name == method.GetName() && method.GetSignature() == signature) {
+ return &method;
}
}
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature) {
- for (size_t i = 0; i < NumVirtualMethods(); ++i) {
- ArtMethod* method = GetVirtualMethod(i);
- if (name == method->GetName() && signature == method->GetSignature()) {
- return method;
+ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
+ size_t pointer_size) {
+ for (auto& method : GetVirtualMethods(pointer_size)) {
+ if (name == method.GetName() && signature == method.GetSignature()) {
+ return &method;
}
}
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
+ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ size_t pointer_size) {
if (GetDexCache() == dex_cache) {
- for (size_t i = 0; i < NumVirtualMethods(); ++i) {
- ArtMethod* method = GetVirtualMethod(i);
- if (method->GetDexMethodIndex() == dex_method_idx &&
- // A miranda method may have a different DexCache and is always created by linking,
- // never *declared* in the class.
- !method->IsMiranda()) {
- return method;
+ for (auto& method : GetVirtualMethods(pointer_size)) {
+ // A miranda method may have a different DexCache and is always created by linking,
+ // never *declared* in the class.
+ if (method.GetDexMethodIndex() == dex_method_idx && !method.IsMiranda()) {
+ return &method;
}
}
}
return nullptr;
}
-ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const StringPiece& signature) {
+ArtMethod* Class::FindVirtualMethod(
+ const StringPiece& name, const StringPiece& signature, size_t pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
- ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature);
+ ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
}
@@ -503,9 +513,10 @@ ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const StringPiece&
return nullptr;
}
-ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const Signature& signature) {
+ArtMethod* Class::FindVirtualMethod(
+ const StringPiece& name, const Signature& signature, size_t pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
- ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature);
+ ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
}
@@ -513,9 +524,10 @@ ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const Signature& si
return nullptr;
}
-ArtMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) {
+ArtMethod* Class::FindVirtualMethod(
+ const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
- ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx);
+ ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
return method;
}
@@ -523,13 +535,12 @@ ArtMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_meth
return nullptr;
}
-ArtMethod* Class::FindClassInitializer() {
- for (size_t i = 0; i < NumDirectMethods(); ++i) {
- ArtMethod* method = GetDirectMethod(i);
- if (method->IsClassInitializer()) {
- DCHECK_STREQ(method->GetName(), "<clinit>");
- DCHECK_STREQ(method->GetSignature().ToString().c_str(), "()V");
- return method;
+ArtMethod* Class::FindClassInitializer(size_t pointer_size) {
+ for (ArtMethod& method : GetDirectMethods(pointer_size)) {
+ if (method.IsClassInitializer()) {
+ DCHECK_STREQ(method.GetName(), "<clinit>");
+ DCHECK_STREQ(method.GetSignature().ToString().c_str(), "()V");
+ return &method;
}
}
return nullptr;
@@ -684,23 +695,18 @@ ArtField* Class::FindField(Thread* self, Handle<Class> klass, const StringPiece&
return nullptr;
}
-static void SetPreverifiedFlagOnMethods(mirror::ObjectArray<mirror::ArtMethod>* methods)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (methods != nullptr) {
- for (int32_t index = 0, end = methods->GetLength(); index < end; ++index) {
- mirror::ArtMethod* method = methods->GetWithoutChecks(index);
- DCHECK(method != nullptr);
- if (!method->IsNative() && !method->IsAbstract()) {
- method->SetPreverified();
- }
+void Class::SetPreverifiedFlagOnAllMethods(size_t pointer_size) {
+ DCHECK(IsVerified());
+ for (auto& m : GetDirectMethods(pointer_size)) {
+ if (!m.IsNative() && !m.IsAbstract()) {
+ m.SetPreverified();
+ }
+ }
+ for (auto& m : GetVirtualMethods(pointer_size)) {
+ if (!m.IsNative() && !m.IsAbstract()) {
+ m.SetPreverified();
}
}
-}
-
-void Class::SetPreverifiedFlagOnAllMethods() {
- DCHECK(IsVerified());
- SetPreverifiedFlagOnMethods(GetDirectMethods());
- SetPreverifiedFlagOnMethods(GetVirtualMethods());
}
const char* Class::GetDescriptor(std::string* storage) {
@@ -795,21 +801,20 @@ const DexFile::TypeList* Class::GetInterfaceTypeList() {
return GetDexFile().GetInterfacesList(*class_def);
}
-void Class::PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_scope) {
- for (uint32_t i = 0; i < kImtSize; i++) {
- // Replace null with conflict.
- mirror::Object* obj = imt_handle_scope->GetReference(i);
- DCHECK(obj != nullptr);
- SetEmbeddedImTableEntry(i, obj->AsArtMethod());
+void Class::PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize],
+ size_t pointer_size) {
+ for (size_t i = 0; i < kImtSize; i++) {
+ auto method = methods[i];
+ DCHECK(method != nullptr);
+ SetEmbeddedImTableEntry(i, method, pointer_size);
}
-
- ObjectArray<ArtMethod>* table = GetVTableDuringLinking();
+ PointerArray* table = GetVTableDuringLinking();
CHECK(table != nullptr) << PrettyClass(this);
- SetEmbeddedVTableLength(table->GetLength());
- for (int32_t i = 0; i < table->GetLength(); i++) {
- SetEmbeddedVTableEntry(i, table->GetWithoutChecks(i));
+ const size_t table_length = table->GetLength();
+ SetEmbeddedVTableLength(table_length);
+ for (size_t i = 0; i < table_length; i++) {
+ SetEmbeddedVTableEntry(i, table->GetElementPtrSize<ArtMethod*>(i, pointer_size), pointer_size);
}
-
// Keep java.lang.Object class's vtable around for since it's easier
// to be reused by array classes during their linking.
if (!IsObjectClass()) {
@@ -820,21 +825,20 @@ void Class::PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_
// The pre-fence visitor for Class::CopyOf().
class CopyClassVisitor {
public:
- explicit CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig,
- size_t new_length, size_t copy_bytes,
- StackHandleScope<mirror::Class::kImtSize>* imt_handle_scope)
+ explicit CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length,
+ size_t copy_bytes, ArtMethod* const (&imt)[mirror::Class::kImtSize],
+ size_t pointer_size)
: self_(self), orig_(orig), new_length_(new_length),
- copy_bytes_(copy_bytes), imt_handle_scope_(imt_handle_scope) {
+ copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) {
}
- void operator()(Object* obj, size_t usable_size) const
+ void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- UNUSED(usable_size);
StackHandleScope<1> hs(self_);
Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_);
- h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_handle_scope_);
+ h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_, pointer_size_);
h_new_class_obj->SetClassSize(new_length_);
}
@@ -843,12 +847,13 @@ class CopyClassVisitor {
Handle<mirror::Class>* const orig_;
const size_t new_length_;
const size_t copy_bytes_;
- StackHandleScope<mirror::Class::kImtSize>* const imt_handle_scope_;
+ ArtMethod* const (&imt_)[mirror::Class::kImtSize];
+ const size_t pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
};
Class* Class::CopyOf(Thread* self, int32_t new_length,
- StackHandleScope<kImtSize>* imt_handle_scope) {
+ ArtMethod* const (&imt)[mirror::Class::kImtSize], size_t pointer_size) {
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
@@ -856,13 +861,12 @@ Class* Class::CopyOf(Thread* self, int32_t new_length,
gc::Heap* heap = Runtime::Current()->GetHeap();
// The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf()
// to skip copying the tail part that we will overwrite here.
- CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt_handle_scope);
- mirror::Object* new_class =
- kMovingClasses
- ? heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor)
- : heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor);
+ CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt, pointer_size);
+ mirror::Object* new_class = kMovingClasses ?
+ heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor) :
+ heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor);
if (UNLIKELY(new_class == nullptr)) {
- CHECK(self->IsExceptionPending()); // Expect an OOME.
+ self->AssertPendingOOMException();
return nullptr;
}
return new_class->AsClass();
@@ -873,26 +877,32 @@ bool Class::ProxyDescriptorEquals(const char* match) {
return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match;
}
-mirror::ArtMethod* Class::GetDeclaredConstructor(
+// TODO: Move this to java_lang_Class.cc?
+ArtMethod* Class::GetDeclaredConstructor(
Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args) {
- auto* direct_methods = GetDirectMethods();
- size_t count = direct_methods != nullptr ? direct_methods->GetLength() : 0u;
- for (size_t i = 0; i < count; ++i) {
- auto* m = direct_methods->GetWithoutChecks(i);
+ for (auto& m : GetDirectMethods(sizeof(void*))) {
// Skip <clinit> which is a static constructor, as well as non constructors.
- if (m->IsStatic() || !m->IsConstructor()) {
+ if (m.IsStatic() || !m.IsConstructor()) {
continue;
}
// May cause thread suspension and exceptions.
- if (m->EqualParameters(args)) {
- return m;
+ if (m.GetInterfaceMethodIfProxy(sizeof(void*))->EqualParameters(args)) {
+ return &m;
}
- if (self->IsExceptionPending()) {
+ if (UNLIKELY(self->IsExceptionPending())) {
return nullptr;
}
}
return nullptr;
}
+uint32_t Class::Depth() {
+ uint32_t depth = 0;
+ for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
+ depth++;
+ }
+ return depth;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index b99fc68..ba8a693 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_CLASS_H_
#define ART_RUNTIME_MIRROR_CLASS_H_
+#include "base/iteration_range.h"
#include "dex_file.h"
#include "gc_root.h"
#include "gc/allocator_type.h"
@@ -27,6 +28,8 @@
#include "object_callbacks.h"
#include "primitive.h"
#include "read_barrier_option.h"
+#include "stride_iterator.h"
+#include "utils.h"
#ifndef IMT_SIZE
#error IMT_SIZE not defined
@@ -35,6 +38,7 @@
namespace art {
class ArtField;
+class ArtMethod;
struct ClassOffsets;
template<class T> class Handle;
template<class T> class Handle;
@@ -44,7 +48,6 @@ template<size_t kNumReferences> class PACKED(4) StackHandleScope;
namespace mirror {
-class ArtMethod;
class ClassLoader;
class Constructor;
class DexCache;
@@ -64,16 +67,6 @@ class MANAGED Class FINAL : public Object {
// (non-marker) interfaces.
static constexpr size_t kImtSize = IMT_SIZE;
- // imtable entry embedded in class object.
- struct MANAGED ImTableEntry {
- HeapReference<ArtMethod> method;
- };
-
- // vtable entry embedded in class object.
- struct MANAGED VTableEntry {
- HeapReference<ArtMethod> method;
- };
-
// Class Status
//
// kStatusRetired: Class that's temporarily used till class linking time
@@ -406,13 +399,7 @@ class MANAGED Class FINAL : public Object {
}
// Depth of class from java.lang.Object
- uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t depth = 0;
- for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
- depth++;
- }
- return depth;
- }
+ uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -427,9 +414,6 @@ class MANAGED Class FINAL : public Object {
bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArtMethodClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsReferenceClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static MemberOffset ComponentTypeOffset() {
@@ -469,12 +453,27 @@ class MANAGED Class FINAL : public Object {
bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
- ((IsAbstract()) && IsArrayClass());
+ (IsAbstract() && IsArrayClass());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsObjectArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetComponentType<kVerifyFlags>() != nullptr && !GetComponentType<kVerifyFlags>()->IsPrimitive();
+ return GetComponentType<kVerifyFlags>() != nullptr &&
+ !GetComponentType<kVerifyFlags>()->IsPrimitive();
+ }
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsIntArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ auto* component_type = GetComponentType<kVerifyFlags>();
+ return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
+ }
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsLongArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ auto* component_type = GetComponentType<kVerifyFlags>();
+ return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
}
// Creates a raw object instance but does not invoke the default constructor.
@@ -517,18 +516,19 @@ class MANAGED Class FINAL : public Object {
uint32_t num_16bit_static_fields,
uint32_t num_32bit_static_fields,
uint32_t num_64bit_static_fields,
- uint32_t num_ref_static_fields);
+ uint32_t num_ref_static_fields,
+ size_t pointer_size);
// The size of java.lang.Class.class.
- static uint32_t ClassClassSize() {
+ static uint32_t ClassClassSize(size_t pointer_size) {
// The number of vtable entries in java.lang.Class.
- uint32_t vtable_entries = Object::kVTableLength + 66;
- return ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 0);
+ uint32_t vtable_entries = Object::kVTableLength + 65;
+ return ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 0, pointer_size);
}
// The size of a java.lang.Class representing a primitive such as int.class.
- static uint32_t PrimitiveClassSize() {
- return ComputeClassSize(false, 0, 0, 0, 0, 0, 0);
+ static uint32_t PrimitiveClassSize(size_t pointer_size) {
+ return ComputeClassSize(false, 0, 0, 0, 0, 0, 0, pointer_size);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -673,60 +673,82 @@ class MANAGED Class FINAL : public Object {
// Also updates the dex_cache_strings_ variable from new_dex_cache.
void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE ObjectArray<ArtMethod>* GetDirectMethods()
+ ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsBegin(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsEnd(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetDirectMethod(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetDirectMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);\
+
+ void SetDirectMethodsPtr(ArtMethod* new_direct_methods)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Used by image writer.
+ void SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetDirectMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
+ ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Use only when we are allocating populating the method arrays.
+ ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Returns the number of static, private, and constructor methods.
- uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_));
+ }
+ void SetNumDirectMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_), num);
+ }
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ObjectArray<ArtMethod>* GetVirtualMethods()
+ ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsBegin(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsEnd(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE void SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetVirtualMethodsPtr(ArtMethod* new_virtual_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the number of non-inherited virtual methods.
- ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_));
+ }
+ void SetNumVirtualMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_), num);
+ }
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ArtMethod* GetVirtualMethod(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- ArtMethod* GetVirtualMethodDuringLinking(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetVirtualMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
+ ArtMethod* GetVirtualMethodDuringLinking(size_t i, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE ObjectArray<ArtMethod>* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE ObjectArray<ArtMethod>* GetVTableDuringLinking()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetVTable(ObjectArray<ArtMethod>* new_vtable)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetVTable(PointerArray* new_vtable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static MemberOffset VTableOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
}
- static MemberOffset EmbeddedImTableOffset() {
- return MemberOffset(sizeof(Class));
- }
-
static MemberOffset EmbeddedVTableLengthOffset() {
- return MemberOffset(sizeof(Class) + kImtSize * sizeof(mirror::Class::ImTableEntry));
- }
-
- static MemberOffset EmbeddedVTableOffset() {
- return MemberOffset(sizeof(Class) + kImtSize * sizeof(ImTableEntry) + sizeof(int32_t));
+ return MemberOffset(sizeof(Class));
}
bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -735,90 +757,117 @@ class MANAGED Class FINAL : public Object {
bool HasVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* GetEmbeddedImTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size);
- void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size);
+
+ ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t GetVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* GetVTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t GetEmbeddedVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetEmbeddedVTableLength(int32_t len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* GetEmbeddedVTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_scope)
+ inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
// specific implementation method for this class.
- ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method)
+ ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given a method implemented by this class' super class, return the specific implementation
// method for this class.
- ArtMethod* FindVirtualMethodForSuper(ArtMethod* method)
+ ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given a method implemented by this class, but potentially from a
// super class or interface, return the specific implementation
// method for this class.
- ArtMethod* FindVirtualMethodForInterface(ArtMethod* method)
+ ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE;
- ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method)
+ ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature)
+ ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature)
+ ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
+ ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature)
+ ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature)
+ ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
+ ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature)
+ ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature)
+ ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
+ ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature)
+ ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature)
+ ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
+ ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature)
+ ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature)
+ ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx)
+ ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -867,7 +916,8 @@ class MANAGED Class FINAL : public Object {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the offset of the first reference instance field. Other reference instance fields follow.
- MemberOffset GetFirstReferenceInstanceFieldOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MemberOffset GetFirstReferenceInstanceFieldOffset()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the number of static fields containing reference types.
uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -886,10 +936,11 @@ class MANAGED Class FINAL : public Object {
}
// Get the offset of the first reference static field. Other reference static fields follow.
- MemberOffset GetFirstReferenceStaticFieldOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the offset of the first reference static field. Other reference static fields follow.
- MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking()
+ MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Gets the static fields of the class.
@@ -989,22 +1040,20 @@ class MANAGED Class FINAL : public Object {
static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
+ // ArtMethods.
template<class Visitor>
- // Visit field roots.
- void VisitFieldRoots(Visitor& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitNativeRoots(Visitor& visitor, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// When class is verified, set the kAccPreverified flag on each method.
- void SetPreverifiedFlagOnAllMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetPreverifiedFlagOnAllMethods(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kVisitClass, typename Visitor>
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Visit references within the embedded tables of the class.
- // TODO: remove NO_THREAD_SAFETY_ANALYSIS when annotalysis handles visitors better.
- template<typename Visitor>
- void VisitEmbeddedImtAndVTable(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
-
// Get the descriptor of the class. In a few cases a std::string is required, rather than
// always create one the storage argument is populated and its internal c_str() returned. We do
// this to avoid memory allocation in the common case.
@@ -1014,7 +1063,6 @@ class MANAGED Class FINAL : public Object {
bool DescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -1037,8 +1085,8 @@ class MANAGED Class FINAL : public Object {
void AssertInitializedOrInitializingInThread(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* CopyOf(Thread* self, int32_t new_length, StackHandleScope<kImtSize>* imt_handle_scope)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize],
+ size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// For proxy class only.
ObjectArray<Class>* GetInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -1060,7 +1108,7 @@ class MANAGED Class FINAL : public Object {
}
// May cause thread suspension due to EqualParameters.
- mirror::ArtMethod* GetDeclaredConstructor(
+ ArtMethod* GetDeclaredConstructor(
Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -1085,6 +1133,20 @@ class MANAGED Class FINAL : public Object {
return GetClassLoader() == nullptr;
}
+ static size_t ImTableEntrySize(size_t pointer_size) {
+ return pointer_size;
+ }
+
+ static size_t VTableEntrySize(size_t pointer_size) {
+ return pointer_size;
+ }
+
+ ALWAYS_INLINE ArtMethod* GetDirectMethodsPtrUnchecked()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtrUnchecked()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -1109,6 +1171,12 @@ class MANAGED Class FINAL : public Object {
bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Check that the pointer size mathces the one in the class linker.
+ ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
+
+ static MemberOffset EmbeddedImTableOffset(size_t pointer_size);
+ static MemberOffset EmbeddedVTableOffset(size_t pointer_size);
+
// Defining class loader, or null for the "bootstrap" system loader.
HeapReference<ClassLoader> class_loader_;
@@ -1123,9 +1191,6 @@ class MANAGED Class FINAL : public Object {
// Short cuts to dex_cache_ member for fast compiled code access.
HeapReference<ObjectArray<String>> dex_cache_strings_;
- // static, private, and <init> methods
- HeapReference<ObjectArray<ArtMethod>> direct_methods_;
-
// The interface table (iftable_) contains pairs of a interface class and an array of the
// interface methods. There is one pair per interface supported by this class. That means one
// pair for each interface we support directly, indirectly via superclass, or indirectly via a
@@ -1148,19 +1213,19 @@ class MANAGED Class FINAL : public Object {
// If class verify fails, we must return same error on subsequent tries.
HeapReference<Class> verify_error_class_;
- // Virtual methods defined in this class; invoked through vtable.
- HeapReference<ObjectArray<ArtMethod>> virtual_methods_;
-
// Virtual method table (vtable), for use by "invoke-virtual". The vtable from the superclass is
// copied in, and virtual methods from our class either replace those from the super or are
// appended. For abstract classes, methods may be created in the vtable that aren't in
// virtual_ methods_ for miranda methods.
- HeapReference<ObjectArray<ArtMethod>> vtable_;
+ HeapReference<PointerArray> vtable_;
// Access flags; low 16 bits are defined by VM spec.
// Note: Shuffled back.
uint32_t access_flags_;
+ // static, private, and <init> methods. Pointer to an ArtMethod array.
+ uint64_t direct_methods_;
+
// instance fields
//
// These describe the layout of the contents of an Object.
@@ -1174,6 +1239,9 @@ class MANAGED Class FINAL : public Object {
// Static fields
uint64_t sfields_;
+ // Virtual methods defined in this class; invoked through vtable. Pointer to an ArtMethod array.
+ uint64_t virtual_methods_;
+
// Total size of the Class instance; used when allocating storage on gc heap.
// See also object_size_.
uint32_t class_size_;
@@ -1189,7 +1257,10 @@ class MANAGED Class FINAL : public Object {
// TODO: really 16bits
int32_t dex_type_idx_;
- // Number of static fields.
+ // Number of direct fields.
+ uint32_t num_direct_methods_;
+
+ // Number of instance fields.
uint32_t num_instance_fields_;
// Number of instance fields that are object refs.
@@ -1201,6 +1272,9 @@ class MANAGED Class FINAL : public Object {
// Number of static fields.
uint32_t num_static_fields_;
+ // Number of virtual methods.
+ uint32_t num_virtual_methods_;
+
// Total object size; used when allocating storage on gc heap.
// (For interfaces and abstract classes this will be zero.)
// See also class_size_.
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 1cb437e..4b5063a 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -20,6 +20,7 @@
#include "dex_cache.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "mirror/class.h"
#include "runtime.h"
@@ -27,20 +28,9 @@
namespace art {
namespace mirror {
-inline uint32_t DexCache::ClassSize() {
+inline uint32_t DexCache::ClassSize(size_t pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 5;
- return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
-}
-
-inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ArtMethod* method = GetResolvedMethods()->Get(method_idx);
- // Hide resolution trampoline methods from the caller
- if (method != nullptr && method->IsRuntimeMethod()) {
- DCHECK_EQ(method, Runtime::Current()->GetResolutionMethod());
- return nullptr;
- }
- return method;
+ return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
@@ -50,15 +40,8 @@ inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
}
inline ArtField* DexCache::GetResolvedField(uint32_t idx, size_t ptr_size) {
- ArtField* field = nullptr;
- if (ptr_size == 8) {
- field = reinterpret_cast<ArtField*>(
- static_cast<uintptr_t>(GetResolvedFields()->AsLongArray()->GetWithoutChecks(idx)));
- } else {
- DCHECK_EQ(ptr_size, 4u);
- field = reinterpret_cast<ArtField*>(
- static_cast<uintptr_t>(GetResolvedFields()->AsIntArray()->GetWithoutChecks(idx)));
- }
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
+ auto* field = GetResolvedFields()->GetElementPtrSize<ArtField*>(idx, ptr_size);
if (field == nullptr || field->GetDeclaringClass()->IsErroneous()) {
return nullptr;
}
@@ -66,15 +49,24 @@ inline ArtField* DexCache::GetResolvedField(uint32_t idx, size_t ptr_size) {
}
inline void DexCache::SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size) {
- if (ptr_size == 8) {
- GetResolvedFields()->AsLongArray()->Set(
- idx, static_cast<uint64_t>(reinterpret_cast<uintptr_t>(field)));
- } else {
- DCHECK_EQ(ptr_size, 4u);
- CHECK_LE(reinterpret_cast<uintptr_t>(field), 0xFFFFFFFF);
- GetResolvedFields()->AsIntArray()->Set(
- idx, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(field)));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
+ GetResolvedFields()->SetElementPtrSize(idx, field, ptr_size);
+}
+
+inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, size_t ptr_size) {
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
+ auto* method = GetResolvedMethods()->GetElementPtrSize<ArtMethod*>(method_idx, ptr_size);
+ // Hide resolution trampoline methods from the caller
+ if (method != nullptr && method->IsRuntimeMethod()) {
+ DCHECK_EQ(method, Runtime::Current()->GetResolutionMethod());
+ return nullptr;
}
+ return method;
+}
+
+inline void DexCache::SetResolvedMethod(uint32_t idx, ArtMethod* method, size_t ptr_size) {
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
+ GetResolvedMethods()->SetElementPtrSize(idx, method, ptr_size);
}
} // namespace mirror
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index ade8bd2..630faee 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -31,12 +31,9 @@
namespace art {
namespace mirror {
-void DexCache::Init(const DexFile* dex_file,
- String* location,
- ObjectArray<String>* strings,
- ObjectArray<Class>* resolved_types,
- ObjectArray<ArtMethod>* resolved_methods,
- Array* resolved_fields) {
+void DexCache::Init(const DexFile* dex_file, String* location, ObjectArray<String>* strings,
+ ObjectArray<Class>* resolved_types, PointerArray* resolved_methods,
+ PointerArray* resolved_fields, size_t pointer_size) {
CHECK(dex_file != nullptr);
CHECK(location != nullptr);
CHECK(strings != nullptr);
@@ -51,24 +48,21 @@ void DexCache::Init(const DexFile* dex_file,
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), resolved_types);
SetFieldObject<false>(ResolvedMethodsOffset(), resolved_methods);
- Runtime* runtime = Runtime::Current();
+ Runtime* const runtime = Runtime::Current();
if (runtime->HasResolutionMethod()) {
// Initialize the resolve methods array to contain trampolines for resolution.
- ArtMethod* trampoline = runtime->GetResolutionMethod();
- for (size_t i = 0, length = resolved_methods->GetLength(); i < length; i++) {
- resolved_methods->SetWithoutChecks<false>(i, trampoline);
- }
+ Fixup(runtime->GetResolutionMethod(), pointer_size);
}
}
-void DexCache::Fixup(ArtMethod* trampoline) {
+void DexCache::Fixup(ArtMethod* trampoline, size_t pointer_size) {
// Fixup the resolve methods array to contain trampoline for resolution.
CHECK(trampoline != nullptr);
- ObjectArray<ArtMethod>* resolved_methods = GetResolvedMethods();
- size_t length = resolved_methods->GetLength();
- for (size_t i = 0; i < length; i++) {
- if (resolved_methods->GetWithoutChecks(i) == nullptr) {
- resolved_methods->SetWithoutChecks<false>(i, trampoline);
+ CHECK(trampoline->IsRuntimeMethod());
+ auto* resolved_methods = GetResolvedMethods();
+ for (size_t i = 0, length = resolved_methods->GetLength(); i < length; i++) {
+ if (resolved_methods->GetElementPtrSize<ArtMethod*>(i, pointer_size) == nullptr) {
+ resolved_methods->SetElementPtrSize(i, trampoline, pointer_size);
}
}
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 7e30b89..0ce83ec 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_
#define ART_RUNTIME_MIRROR_DEX_CACHE_H_
+#include "array.h"
#include "art_field.h"
#include "art_method.h"
#include "class.h"
@@ -38,22 +39,19 @@ class String;
class MANAGED DexCache FINAL : public Object {
public:
// Size of java.lang.DexCache.class.
- static uint32_t ClassSize();
+ static uint32_t ClassSize(size_t pointer_size);
// Size of an instance of java.lang.DexCache not including referenced values.
static constexpr uint32_t InstanceSize() {
return sizeof(DexCache);
}
- void Init(const DexFile* dex_file,
- String* location,
- ObjectArray<String>* strings,
- ObjectArray<Class>* types,
- ObjectArray<ArtMethod>* methods,
- Array* fields)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Init(const DexFile* dex_file, String* location, ObjectArray<String>* strings,
+ ObjectArray<Class>* types, PointerArray* methods, PointerArray* fields,
+ size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Fixup(ArtMethod* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Fixup(ArtMethod* trampoline, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
@@ -109,19 +107,18 @@ class MANAGED DexCache FINAL : public Object {
void SetResolvedType(uint32_t type_idx, Class* resolved)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* GetResolvedMethod(uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, size_t ptr_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- GetResolvedMethods()->Set(method_idx, resolved);
- }
+ ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved, size_t ptr_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
- ArtField* GetResolvedField(uint32_t idx, size_t ptr_size)
+ ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, size_t ptr_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
- void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size)
+ ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -133,13 +130,12 @@ class MANAGED DexCache FINAL : public Object {
OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_));
}
- ObjectArray<ArtMethod>* GetResolvedMethods() ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject< ObjectArray<ArtMethod>>(ResolvedMethodsOffset());
+ PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject<PointerArray>(ResolvedMethodsOffset());
}
- Array* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<Array>(ResolvedFieldsOffset());
+ PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject<PointerArray>(ResolvedFieldsOffset());
}
const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -154,9 +150,9 @@ class MANAGED DexCache FINAL : public Object {
private:
HeapReference<Object> dex_;
HeapReference<String> location_;
- // Either an int array or long array (64 bit).
- HeapReference<Object> resolved_fields_;
- HeapReference<ObjectArray<ArtMethod>> resolved_methods_;
+ // Either an int array or long array based on runtime ISA since these arrays hold pointers.
+ HeapReference<PointerArray> resolved_fields_;
+ HeapReference<PointerArray> resolved_methods_;
HeapReference<ObjectArray<Class>> resolved_types_;
HeapReference<ObjectArray<String>> strings_;
uint64_t dex_file_;
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 388921b..8a0daec 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -50,14 +50,14 @@ inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field,
}
}
auto ret = hs.NewHandle(static_cast<Field*>(StaticClass()->AllocObject(self)));
- if (ret.Get() == nullptr) {
- if (kIsDebugBuild) {
- self->AssertPendingException();
- }
+ if (UNLIKELY(ret.Get() == nullptr)) {
+ self->AssertPendingOOMException();
return nullptr;
}
+ const auto pointer_size = kTransactionActive ?
+ Runtime::Current()->GetClassLinker()->GetImagePointerSize() : sizeof(void*);
auto dex_field_index = field->GetDexFieldIndex();
- auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, sizeof(void*));
+ auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, pointer_size);
if (field->GetDeclaringClass()->IsProxyClass()) {
DCHECK(field->IsStatic());
DCHECK_LT(dex_field_index, 2U);
@@ -70,7 +70,7 @@ inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field,
} else {
// We rely on the field being resolved so that we can back to the ArtField
// (i.e. FromReflectedMethod).
- field->GetDexCache()->SetResolvedField(dex_field_index, field, sizeof(void*));
+ field->GetDexCache()->SetResolvedField(dex_field_index, field, pointer_size);
}
}
ret->SetType<kTransactionActive>(type.Get());
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index ac56129..02e4484 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -16,6 +16,7 @@
#include "field-inl.h"
+#include "class-inl.h"
#include "dex_cache-inl.h"
#include "object_array-inl.h"
#include "object-inl.h"
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 1c1c7b3..1ea5bee 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -34,27 +34,22 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<ArtMethod>* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ObjectArray<ArtMethod>* method_array =
- down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
+ PointerArray* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray));
DCHECK(method_array != nullptr);
return method_array;
}
size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ObjectArray<ArtMethod>* method_array =
- down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
- if (method_array == nullptr) {
- return 0;
- }
- return method_array->GetLength();
+ auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray));
+ return method_array == nullptr ? 0u : method_array->GetLength();
}
- void SetMethodArray(int32_t i, ObjectArray<ArtMethod>* new_ma)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(new_ma != nullptr);
- DCHECK(Get((i * kMax) + kMethodArray) == nullptr);
- Set<false>((i * kMax) + kMethodArray, new_ma);
+ void SetMethodArray(int32_t i, PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(arr != nullptr);
+ auto idx = i * kMax + kMethodArray;
+ DCHECK(Get(idx) == nullptr);
+ Set<false>(idx, arr);
}
size_t Count() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 81530bb..85c52e9 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -16,7 +16,9 @@
#include "method.h"
-#include "mirror/art_method.h"
+#include "art_method.h"
+#include "gc_root-inl.h"
+#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
namespace art {
@@ -49,7 +51,7 @@ void Method::ResetArrayClass() {
array_class_ = GcRoot<Class>(nullptr);
}
-Method* Method::CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) {
+Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(!method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
@@ -90,7 +92,7 @@ void Constructor::VisitRoots(RootVisitor* visitor) {
array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
-Constructor* Constructor::CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) {
+Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 88100f0..42c76c0 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -28,7 +28,7 @@ class Class;
// C++ mirror of java.lang.reflect.Method.
class MANAGED Method : public AbstractMethod {
public:
- static Method* CreateFromArtMethod(Thread* self, mirror::ArtMethod* method)
+ static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -59,7 +59,7 @@ class MANAGED Method : public AbstractMethod {
// C++ mirror of java.lang.reflect.Constructor.
class MANAGED Constructor: public AbstractMethod {
public:
- static Constructor* CreateFromArtMethod(Thread* self, mirror::ArtMethod* method)
+ static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 39d0f56..05c44e5 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -24,6 +24,7 @@
#include "atomic.h"
#include "array-inl.h"
#include "class.h"
+#include "class_linker.h"
#include "lock_word-inl.h"
#include "monitor.h"
#include "object_array-inl.h"
@@ -36,9 +37,9 @@
namespace art {
namespace mirror {
-inline uint32_t Object::ClassSize() {
+inline uint32_t Object::ClassSize(size_t pointer_size) {
uint32_t vtable_entries = kVTableLength;
- return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
+ return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -253,18 +254,6 @@ inline bool Object::IsArrayInstance() {
template IsArrayClass<kVerifyFlags, kReadBarrierOption>();
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline bool Object::IsArtMethod() {
- return GetClass<kVerifyFlags, kReadBarrierOption>()->
- template IsArtMethodClass<kReadBarrierOption>();
-}
-
-template<VerifyObjectFlags kVerifyFlags>
-inline ArtMethod* Object::AsArtMethod() {
- DCHECK(IsArtMethod<kVerifyFlags>());
- return down_cast<ArtMethod*>(this);
-}
-
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsReferenceInstance() {
return GetClass<kVerifyFlags>()->IsTypeOfReferenceClass();
@@ -292,7 +281,7 @@ inline BooleanArray* Object::AsBooleanArray() {
template<VerifyObjectFlags kVerifyFlags>
inline ByteArray* Object::AsByteArray() {
- static const VerifyObjectFlags kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte());
return down_cast<ByteArray*>(this);
@@ -300,7 +289,7 @@ inline ByteArray* Object::AsByteArray() {
template<VerifyObjectFlags kVerifyFlags>
inline ByteArray* Object::AsByteSizedArray() {
- constexpr VerifyObjectFlags kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte() ||
GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveBoolean());
@@ -333,25 +322,41 @@ inline ShortArray* Object::AsShortSizedArray() {
}
template<VerifyObjectFlags kVerifyFlags>
-inline IntArray* Object::AsIntArray() {
+inline bool Object::IsIntArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- CHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- CHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveInt() ||
- GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat());
+ auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
+ return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline IntArray* Object::AsIntArray() {
+ DCHECK(IsIntArray<kVerifyFlags>());
return down_cast<IntArray*>(this);
}
template<VerifyObjectFlags kVerifyFlags>
-inline LongArray* Object::AsLongArray() {
+inline bool Object::IsLongArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- CHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- CHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveLong() ||
- GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble());
+ auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
+ return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline LongArray* Object::AsLongArray() {
+ DCHECK(IsLongArray<kVerifyFlags>());
return down_cast<LongArray*>(this);
}
template<VerifyObjectFlags kVerifyFlags>
+inline bool Object::IsFloatArray() {
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
+ return component_type != nullptr && component_type->template IsPrimitiveFloat<kNewFlags>();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
inline FloatArray* Object::AsFloatArray() {
+ DCHECK(IsFloatArray<kVerifyFlags>());
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat());
@@ -359,7 +364,15 @@ inline FloatArray* Object::AsFloatArray() {
}
template<VerifyObjectFlags kVerifyFlags>
+inline bool Object::IsDoubleArray() {
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
+ return component_type != nullptr && component_type->template IsPrimitiveDouble<kNewFlags>();
+}
+
+template<VerifyObjectFlags kVerifyFlags>
inline DoubleArray* Object::AsDoubleArray() {
+ DCHECK(IsDoubleArray<kVerifyFlags>());
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble());
@@ -950,8 +963,11 @@ inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& v
if (num_reference_fields == 0u) {
continue;
}
+ // Presumably GC can happen when we are cross compiling, it should not cause performance
+ // problems to do pointer size logic.
MemberOffset field_offset = kIsStatic
- ? klass->GetFirstReferenceStaticFieldOffset()
+ ? klass->GetFirstReferenceStaticFieldOffset(
+ Runtime::Current()->GetClassLinker()->GetImagePointerSize())
: klass->GetFirstReferenceInstanceFieldOffset();
for (size_t i = 0; i < num_reference_fields; ++i) {
// TODO: Do a simpler check?
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index f9740bb..b177e2f 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -106,9 +106,8 @@ class CopyObjectVisitor {
: self_(self), orig_(orig), num_bytes_(num_bytes) {
}
- void operator()(Object* obj, size_t usable_size) const
+ void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- UNUSED(usable_size);
Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 5afe99f..60c756a 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -25,6 +25,7 @@
namespace art {
class ArtField;
+class ArtMethod;
class ImageWriter;
class LockWord;
class Monitor;
@@ -34,7 +35,6 @@ class VoidFunctor;
namespace mirror {
-class ArtMethod;
class Array;
class Class;
class FinalizerReference;
@@ -71,7 +71,7 @@ class MANAGED LOCKABLE Object {
static constexpr size_t kVTableLength = 11;
// The size of the java.lang.Class representing a java.lang.Object.
- static uint32_t ClassSize();
+ static uint32_t ClassSize(size_t pointer_size);
// Size of an instance of java.lang.Object.
static constexpr uint32_t InstanceSize() {
@@ -176,12 +176,22 @@ class MANAGED LOCKABLE Object {
ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
FloatArray* AsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool IsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -196,12 +206,6 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ArtMethod* AsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -429,7 +433,7 @@ class MANAGED LOCKABLE Object {
field_offset, static_cast<int32_t>(ptr));
} else {
SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, static_cast<int64_t>(reinterpret_cast<intptr_t>(new_value)));
+ field_offset, static_cast<int64_t>(reinterpret_cast<uintptr_t>(new_value)));
}
}
// TODO fix thread safety analysis broken by the use of template. This should be
@@ -463,8 +467,8 @@ class MANAGED LOCKABLE Object {
} else {
int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
// Check that we dont lose any non 0 bits.
- DCHECK_EQ(reinterpret_cast<int64_t>(reinterpret_cast<T>(v)), v);
- return reinterpret_cast<T>(v);
+ DCHECK_EQ(static_cast<int64_t>(static_cast<uintptr_t>(v)), v);
+ return reinterpret_cast<T>(static_cast<uintptr_t>(v));
}
}
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 6404faf..5eddc18 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -26,8 +26,8 @@ template<class T>
class MANAGED ObjectArray: public Array {
public:
// The size of Object[].class.
- static uint32_t ClassSize() {
- return Array::ClassSize();
+ static uint32_t ClassSize(size_t pointer_size) {
+ return Array::ClassSize(pointer_size);
}
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 8e50a7a..85ea28f 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -77,9 +77,9 @@ class ObjectTest : public CommonRuntimeTest {
TEST_F(ObjectTest, Constants) {
EXPECT_EQ(kObjectReferenceSize, sizeof(HeapReference<Object>));
EXPECT_EQ(kObjectHeaderSize, sizeof(Object));
- EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
+ EXPECT_EQ(ART_METHOD_QUICK_CODE_OFFSET_32,
ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value());
- EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
+ EXPECT_EQ(ART_METHOD_QUICK_CODE_OFFSET_64,
ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value());
}
@@ -306,7 +306,7 @@ TEST_F(ObjectTest, CheckAndAllocArrayFromCode) {
// pretend we are trying to call 'new char[3]' from String.toCharArray
ScopedObjectAccess soa(Thread::Current());
Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;");
- ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V");
+ ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V", sizeof(void*));
const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I");
ASSERT_TRUE(string_id != nullptr);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(
@@ -366,7 +366,7 @@ TEST_F(ObjectTest, StaticFieldFromCode) {
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<ClassLoader*>(class_loader)));
Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
- ArtMethod* clinit = klass->FindClassInitializer();
+ ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
ASSERT_TRUE(klass_string_id != nullptr);
const DexFile::TypeId* klass_type_id = dex_file->FindTypeId(
@@ -508,22 +508,22 @@ TEST_F(ObjectTest, DescriptorCompare) {
Class* klass2 = linker->FindClass(soa.Self(), "LProtoCompare2;", class_loader_2);
ASSERT_TRUE(klass2 != nullptr);
- ArtMethod* m1_1 = klass1->GetVirtualMethod(0);
+ ArtMethod* m1_1 = klass1->GetVirtualMethod(0, sizeof(void*));
EXPECT_STREQ(m1_1->GetName(), "m1");
- ArtMethod* m2_1 = klass1->GetVirtualMethod(1);
+ ArtMethod* m2_1 = klass1->GetVirtualMethod(1, sizeof(void*));
EXPECT_STREQ(m2_1->GetName(), "m2");
- ArtMethod* m3_1 = klass1->GetVirtualMethod(2);
+ ArtMethod* m3_1 = klass1->GetVirtualMethod(2, sizeof(void*));
EXPECT_STREQ(m3_1->GetName(), "m3");
- ArtMethod* m4_1 = klass1->GetVirtualMethod(3);
+ ArtMethod* m4_1 = klass1->GetVirtualMethod(3, sizeof(void*));
EXPECT_STREQ(m4_1->GetName(), "m4");
- ArtMethod* m1_2 = klass2->GetVirtualMethod(0);
+ ArtMethod* m1_2 = klass2->GetVirtualMethod(0, sizeof(void*));
EXPECT_STREQ(m1_2->GetName(), "m1");
- ArtMethod* m2_2 = klass2->GetVirtualMethod(1);
+ ArtMethod* m2_2 = klass2->GetVirtualMethod(1, sizeof(void*));
EXPECT_STREQ(m2_2->GetName(), "m2");
- ArtMethod* m3_2 = klass2->GetVirtualMethod(2);
+ ArtMethod* m3_2 = klass2->GetVirtualMethod(2, sizeof(void*));
EXPECT_STREQ(m3_2->GetName(), "m3");
- ArtMethod* m4_2 = klass2->GetVirtualMethod(3);
+ ArtMethod* m4_2 = klass2->GetVirtualMethod(3, sizeof(void*));
EXPECT_STREQ(m4_2->GetName(), "m4");
}
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index d1d2a3a..01e99b9 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -22,9 +22,9 @@
namespace art {
namespace mirror {
-inline uint32_t Reference::ClassSize() {
+inline uint32_t Reference::ClassSize(size_t pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 5;
- return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0);
+ return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
inline bool Reference::IsEnqueuable() {
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
index 70bcf92..3c7f8c8 100644
--- a/runtime/mirror/reference.cc
+++ b/runtime/mirror/reference.cc
@@ -16,7 +16,7 @@
#include "reference.h"
-#include "mirror/art_method.h"
+#include "art_method.h"
#include "gc_root-inl.h"
namespace art {
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index c11d79d..4bbdb99 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -42,7 +42,7 @@ namespace mirror {
class MANAGED Reference : public Object {
public:
// Size of java.lang.ref.Reference.class.
- static uint32_t ClassSize();
+ static uint32_t ClassSize(size_t pointer_size);
// Size of an instance of java.lang.ref.Reference.
static constexpr uint32_t InstanceSize() {
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 35b8aef..9f6cd11 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -30,9 +30,9 @@
namespace art {
namespace mirror {
-inline uint32_t String::ClassSize() {
+inline uint32_t String::ClassSize(size_t pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 52;
- return Class::ComputeClassSize(true, vtable_entries, 0, 1, 0, 1, 2);
+ return Class::ComputeClassSize(true, vtable_entries, 0, 1, 0, 1, 2, pointer_size);
}
// Sets string count in the allocation code path to ensure it is guarded by a CAS.
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index fcfe976..a8f16d7 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -34,7 +34,7 @@ namespace mirror {
class MANAGED String FINAL : public Object {
public:
// Size of java.lang.String.class.
- static uint32_t ClassSize();
+ static uint32_t ClassSize(size_t pointer_size);
// Size of an instance of java.lang.String not including its value array.
static constexpr uint32_t InstanceSize() {
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 782b9c0..1c21edb 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -71,9 +71,18 @@ bool Throwable::IsCheckedException() {
int32_t Throwable::GetStackDepth() {
Object* stack_state = GetStackState();
- if (stack_state == nullptr || !stack_state->IsObjectArray()) return -1;
- ObjectArray<Object>* method_trace = down_cast<ObjectArray<Object>*>(stack_state);
- return method_trace->GetLength() - 1;
+ if (stack_state == nullptr) {
+ return -1;
+ }
+ if (!stack_state->IsIntArray() && !stack_state->IsLongArray()) {
+ return -1;
+ }
+ mirror::PointerArray* method_trace = down_cast<mirror::PointerArray*>(stack_state->AsArray());
+ int32_t array_len = method_trace->GetLength();
+ // The format is [method pointers][pcs] so the depth is half the length (see method
+ // BuildInternalStackTraceVisitor::Init).
+ CHECK_EQ(array_len % 2, 0);
+ return array_len / 2;
}
std::string Throwable::Dump() {
@@ -86,17 +95,21 @@ std::string Throwable::Dump() {
result += "\n";
Object* stack_state = GetStackState();
// check stack state isn't missing or corrupt
- if (stack_state != nullptr && stack_state->IsObjectArray()) {
+ if (stack_state != nullptr &&
+ (stack_state->IsIntArray() || stack_state->IsLongArray())) {
// Decode the internal stack trace into the depth and method trace
- ObjectArray<Object>* method_trace = down_cast<ObjectArray<Object>*>(stack_state);
- int32_t depth = method_trace->GetLength() - 1;
- IntArray* pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
+ // Format is [method pointers][pcs]
+ auto* method_trace = down_cast<mirror::PointerArray*>(stack_state->AsArray());
+ auto array_len = method_trace->GetLength();
+ CHECK_EQ(array_len % 2, 0);
+ const auto depth = array_len / 2;
if (depth == 0) {
result += "(Throwable with empty stack trace)";
} else {
+ auto ptr_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (int32_t i = 0; i < depth; ++i) {
- mirror::ArtMethod* method = down_cast<ArtMethod*>(method_trace->Get(i));
- uint32_t dex_pc = pc_trace->Get(i);
+ ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, ptr_size);
+ uintptr_t dex_pc = method_trace->GetElementPtrSize<uintptr_t>(i + depth, ptr_size);
int32_t line_number = method->GetLineNumFromDexPC(dex_pc);
const char* source_file = method->GetDeclaringClassSourceFile();
result += StringPrintf(" at %s (%s:%d)\n", PrettyMethod(method, true).c_str(),
@@ -108,8 +121,7 @@ std::string Throwable::Dump() {
if (stack_trace != nullptr && stack_trace->IsObjectArray()) {
CHECK_EQ(stack_trace->GetClass()->GetComponentType(),
StackTraceElement::GetStackTraceElement());
- ObjectArray<StackTraceElement>* ste_array =
- down_cast<ObjectArray<StackTraceElement>*>(stack_trace);
+ auto* ste_array = down_cast<ObjectArray<StackTraceElement>*>(stack_trace);
if (ste_array->GetLength() == 0) {
result += "(Throwable with empty stack trace)";
} else {
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index dc016a5..4be25d6 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -21,6 +21,7 @@
#include <cutils/trace.h>
#include <vector>
+#include "art_method-inl.h"
#include "base/mutex.h"
#include "base/stl_util.h"
#include "base/time_utils.h"
@@ -28,7 +29,6 @@
#include "dex_file-inl.h"
#include "dex_instruction.h"
#include "lock_word-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -245,7 +245,7 @@ void Monitor::Lock(Thread* self) {
// Contended.
const bool log_contention = (lock_profiling_threshold_ != 0);
uint64_t wait_start_ms = log_contention ? MilliTime() : 0;
- mirror::ArtMethod* owners_method = locking_method_;
+ ArtMethod* owners_method = locking_method_;
uint32_t owners_dex_pc = locking_dex_pc_;
// Do this before releasing the lock so that we don't get deflated.
size_t num_waiters = num_waiters_;
@@ -449,7 +449,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
int prev_lock_count = lock_count_;
lock_count_ = 0;
owner_ = nullptr;
- mirror::ArtMethod* saved_method = locking_method_;
+ ArtMethod* saved_method = locking_method_;
locking_method_ = nullptr;
uintptr_t saved_dex_pc = locking_dex_pc_;
locking_dex_pc_ = 0;
@@ -994,14 +994,15 @@ mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
void* callback_context, bool abort_on_failure) {
- mirror::ArtMethod* m = stack_visitor->GetMethod();
+ ArtMethod* m = stack_visitor->GetMethod();
CHECK(m != nullptr);
// Native methods are an easy special case.
// TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
if (m->IsNative()) {
if (m->IsSynchronized()) {
- mirror::Object* jni_this = stack_visitor->GetCurrentHandleScope()->GetReference(0);
+ mirror::Object* jni_this =
+ stack_visitor->GetCurrentHandleScope(sizeof(void*))->GetReference(0);
callback(jni_this, callback_context);
}
return;
@@ -1087,7 +1088,7 @@ bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return owner_ != nullptr;
}
-void Monitor::TranslateLocation(mirror::ArtMethod* method, uint32_t dex_pc,
+void Monitor::TranslateLocation(ArtMethod* method, uint32_t dex_pc,
const char** source_file, uint32_t* line_number) const {
// If method is null, location is unknown
if (method == nullptr) {
diff --git a/runtime/monitor.h b/runtime/monitor.h
index b7245c1..8f3a91d 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -36,14 +36,14 @@
namespace art {
+class ArtMethod;
class LockWord;
template<class T> class Handle;
-class Thread;
class StackVisitor;
+class Thread;
typedef uint32_t MonitorId;
namespace mirror {
- class ArtMethod;
class Object;
} // namespace mirror
@@ -226,7 +226,7 @@ class Monitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Translates the provided method and pc into its declaring class' source file and line number.
- void TranslateLocation(mirror::ArtMethod* method, uint32_t pc,
+ void TranslateLocation(ArtMethod* method, uint32_t pc,
const char** source_file, uint32_t* line_number) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -262,7 +262,7 @@ class Monitor {
// Method and dex pc where the lock owner acquired the lock, used when lock
// sampling is enabled. locking_method_ may be null if the lock is currently
// unlocked, or if the lock is acquired by the system when the stack is empty.
- mirror::ArtMethod* locking_method_ GUARDED_BY(monitor_lock_);
+ ArtMethod* locking_method_ GUARDED_BY(monitor_lock_);
uint32_t locking_dex_pc_ GUARDED_BY(monitor_lock_);
// The denser encoded version of this monitor as stored in the lock word.
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index 48c9cce..efe2e82 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -78,7 +78,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample
// Emit the source code file name, <= 37 bytes.
uint32_t pc;
- mirror::ArtMethod* m = self->GetCurrentMethod(&pc);
+ ArtMethod* m = self->GetCurrentMethod(&pc);
const char* filename;
uint32_t line_number;
TranslateLocation(m, pc, &filename, &line_number);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index a172197..5dd354d 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -27,6 +27,7 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version);
#include "toStringArray.h"
#pragma GCC diagnostic pop
+#include "art_method-inl.h"
#include "arch/instruction_set.h"
#include "class_linker-inl.h"
#include "common_throws.h"
@@ -40,7 +41,6 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version);
#include "gc/task_processor.h"
#include "intern_table.h"
#include "jni_internal.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
@@ -350,7 +350,7 @@ static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uin
static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
+ ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, sizeof(void*));
if (method != nullptr) {
return;
}
@@ -363,14 +363,14 @@ static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, ui
switch (invoke_type) {
case kDirect:
case kStatic:
- method = klass->FindDirectMethod(dex_cache.Get(), method_idx);
+ method = klass->FindDirectMethod(dex_cache.Get(), method_idx, sizeof(void*));
break;
case kInterface:
- method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx);
+ method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, sizeof(void*));
break;
case kSuper:
case kVirtual:
- method = klass->FindVirtualMethod(dex_cache.Get(), method_idx);
+ method = klass->FindVirtualMethod(dex_cache.Get(), method_idx, sizeof(void*));
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
@@ -380,7 +380,7 @@ static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, ui
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved method " << PrettyMethod(method);
- dex_cache->SetResolvedMethod(method_idx, method);
+ dex_cache->SetResolvedMethod(method_idx, method, sizeof(void*));
}
struct DexCacheStats {
@@ -452,7 +452,7 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
}
}
for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
- mirror::ArtMethod* method = dex_cache->GetResolvedMethod(j);
+ ArtMethod* method = dex_cache->GetResolvedMethod(j, sizeof(void*));
if (method != nullptr) {
filled->num_methods++;
}
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 1d7d853..ee62755 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -16,9 +16,9 @@
#include "dalvik_system_VMStack.h"
+#include "art_method-inl.h"
#include "jni_internal.h"
#include "nth_caller_visitor.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -90,10 +90,13 @@ static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass) {
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(class_loader == nullptr);
mirror::Class* c = GetMethod()->GetDeclaringClass();
- mirror::Object* cl = c->GetClassLoader();
- if (cl != nullptr) {
- class_loader = cl;
- return false;
+ // c is null for runtime methods.
+ if (c != nullptr) {
+ mirror::Object* cl = c->GetClassLoader();
+ if (cl != nullptr) {
+ class_loader = cl;
+ return false;
+ }
}
return true;
}
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 795a0ea..94024ef 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -273,7 +273,7 @@ static jobject Class_getDeclaredConstructorInternal(
return nullptr;
}
-static ALWAYS_INLINE inline bool MethodMatchesConstructor(mirror::ArtMethod* m, bool public_only)
+static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(m != nullptr);
return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor();
@@ -283,14 +283,11 @@ static jobjectArray Class_getDeclaredConstructorsInternal(
JNIEnv* env, jobject javaThis, jboolean publicOnly) {
ScopedFastNativeObjectAccess soa(env);
auto* klass = DecodeClass(soa, javaThis);
- StackHandleScope<2> hs(soa.Self());
- auto h_direct_methods = hs.NewHandle(klass->GetDirectMethods());
+ StackHandleScope<1> hs(soa.Self());
size_t constructor_count = 0;
- auto count = h_direct_methods.Get() != nullptr ? h_direct_methods->GetLength() : 0u;
// Two pass approach for speed.
- for (size_t i = 0; i < count; ++i) {
- constructor_count += MethodMatchesConstructor(h_direct_methods->GetWithoutChecks(i),
- publicOnly != JNI_FALSE) ? 1u : 0u;
+ for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
+ constructor_count += MethodMatchesConstructor(&m, publicOnly != JNI_FALSE) ? 1u : 0u;
}
auto h_constructors = hs.NewHandle(mirror::ObjectArray<mirror::Constructor>::Alloc(
soa.Self(), mirror::Constructor::ArrayClass(), constructor_count));
@@ -299,12 +296,11 @@ static jobjectArray Class_getDeclaredConstructorsInternal(
return nullptr;
}
constructor_count = 0;
- for (size_t i = 0; i < count; ++i) {
- auto* method = h_direct_methods->GetWithoutChecks(i);
- if (MethodMatchesConstructor(method, publicOnly != JNI_FALSE)) {
- auto* constructor = mirror::Constructor::CreateFromArtMethod(soa.Self(), method);
+ for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
+ if (MethodMatchesConstructor(&m, publicOnly != JNI_FALSE)) {
+ auto* constructor = mirror::Constructor::CreateFromArtMethod(soa.Self(), &m);
if (UNLIKELY(constructor == nullptr)) {
- soa.Self()->AssertPendingException();
+ soa.Self()->AssertPendingOOMException();
return nullptr;
}
h_constructors->SetWithoutChecks<false>(constructor_count++, constructor);
@@ -323,7 +319,7 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
// were synthesized by the runtime.
constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
ScopedFastNativeObjectAccess soa(env);
- StackHandleScope<5> hs(soa.Self());
+ StackHandleScope<4> hs(soa.Self());
auto h_method_name = hs.NewHandle(soa.Decode<mirror::String*>(name));
if (UNLIKELY(h_method_name.Get() == nullptr)) {
ThrowNullPointerException("name == null");
@@ -331,60 +327,49 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
}
auto h_args = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
auto* klass = DecodeClass(soa, javaThis);
- mirror::ArtMethod* result = nullptr;
- auto* virtual_methods = klass->GetVirtualMethods();
- if (virtual_methods != nullptr) {
- auto h_virtual_methods = hs.NewHandle(virtual_methods);
- for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) {
- auto* m = h_virtual_methods->GetWithoutChecks(i);
- auto* np_method = m->GetInterfaceMethodIfProxy();
+ ArtMethod* result = nullptr;
+ for (auto& m : klass->GetVirtualMethods(sizeof(void*))) {
+ auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
+ // May cause thread suspension.
+ mirror::String* np_name = np_method->GetNameAsString(soa.Self());
+ if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+ if (UNLIKELY(soa.Self()->IsExceptionPending())) {
+ return nullptr;
+ }
+ continue;
+ }
+ auto modifiers = m.GetAccessFlags();
+ if ((modifiers & kSkipModifiers) == 0) {
+ return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m));
+ }
+ if ((modifiers & kAccMiranda) == 0) {
+ result = &m; // Remember as potential result if it's not a miranda method.
+ }
+ }
+ if (result == nullptr) {
+ for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
+ auto modifiers = m.GetAccessFlags();
+ if ((modifiers & kAccConstructor) != 0) {
+ continue;
+ }
+ auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
// May cause thread suspension.
mirror::String* np_name = np_method->GetNameAsString(soa.Self());
+ if (np_name == nullptr) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
if (UNLIKELY(soa.Self()->IsExceptionPending())) {
return nullptr;
}
continue;
}
- auto modifiers = m->GetAccessFlags();
if ((modifiers & kSkipModifiers) == 0) {
- return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), m));
- }
- if ((modifiers & kAccMiranda) == 0) {
- result = m; // Remember as potential result if it's not a miranda method.
- }
- }
- }
- if (result == nullptr) {
- auto* direct_methods = klass->GetDirectMethods();
- if (direct_methods != nullptr) {
- auto h_direct_methods = hs.NewHandle(direct_methods);
- for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) {
- auto* m = h_direct_methods->GetWithoutChecks(i);
- auto modifiers = m->GetAccessFlags();
- if ((modifiers & kAccConstructor) != 0) {
- continue;
- }
- auto* np_method = m->GetInterfaceMethodIfProxy();
- // May cause thread suspension.
- mirror::String* np_name = np_method ->GetNameAsString(soa.Self());
- if (np_name == nullptr) {
- soa.Self()->AssertPendingException();
- return nullptr;
- }
- if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
- if (UNLIKELY(soa.Self()->IsExceptionPending())) {
- return nullptr;
- }
- continue;
- }
- if ((modifiers & kSkipModifiers) == 0) {
- return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(
- soa.Self(), m));
- }
- // Direct methods cannot be miranda methods, so this potential result must be synthetic.
- result = m;
+ return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m));
}
+ // Direct methods cannot be miranda methods, so this potential result must be synthetic.
+ result = &m;
}
}
return result != nullptr ?
@@ -395,64 +380,50 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis,
jboolean publicOnly) {
ScopedFastNativeObjectAccess soa(env);
- StackHandleScope<5> hs(soa.Self());
+ StackHandleScope<3> hs(soa.Self());
auto* klass = DecodeClass(soa, javaThis);
- auto virtual_methods = hs.NewHandle(klass->GetVirtualMethods());
- auto direct_methods = hs.NewHandle(klass->GetDirectMethods());
size_t num_methods = 0;
- if (virtual_methods.Get() != nullptr) {
- for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) {
- auto* m = virtual_methods->GetWithoutChecks(i);
- auto modifiers = m->GetAccessFlags();
- if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
- (modifiers & kAccMiranda) == 0) {
- ++num_methods;
- }
+ for (auto& m : klass->GetVirtualMethods(sizeof(void*))) {
+ auto modifiers = m.GetAccessFlags();
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccMiranda) == 0) {
+ ++num_methods;
}
}
- if (direct_methods.Get() != nullptr) {
- for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) {
- auto* m = direct_methods->GetWithoutChecks(i);
- auto modifiers = m->GetAccessFlags();
- // Add non-constructor direct/static methods.
- if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
- (modifiers & kAccConstructor) == 0) {
- ++num_methods;
- }
+ for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
+ auto modifiers = m.GetAccessFlags();
+ // Add non-constructor direct/static methods.
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccConstructor) == 0) {
+ ++num_methods;
}
}
auto ret = hs.NewHandle(mirror::ObjectArray<mirror::Method>::Alloc(
soa.Self(), mirror::Method::ArrayClass(), num_methods));
num_methods = 0;
- if (virtual_methods.Get() != nullptr) {
- for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) {
- auto* m = virtual_methods->GetWithoutChecks(i);
- auto modifiers = m->GetAccessFlags();
- if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
- (modifiers & kAccMiranda) == 0) {
- auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), m);
- if (method == nullptr) {
- soa.Self()->AssertPendingException();
- return nullptr;
- }
- ret->SetWithoutChecks<false>(num_methods++, method);
+ for (auto& m : klass->GetVirtualMethods(sizeof(void*))) {
+ auto modifiers = m.GetAccessFlags();
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccMiranda) == 0) {
+ auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), &m);
+ if (method == nullptr) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
}
+ ret->SetWithoutChecks<false>(num_methods++, method);
}
}
- if (direct_methods.Get() != nullptr) {
- for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) {
- auto* m = direct_methods->GetWithoutChecks(i);
- auto modifiers = m->GetAccessFlags();
- // Add non-constructor direct/static methods.
- if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
- (modifiers & kAccConstructor) == 0) {
- auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), m);
- if (method == nullptr) {
- soa.Self()->AssertPendingException();
- return nullptr;
- }
- ret->SetWithoutChecks<false>(num_methods++, method);
+ for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
+ auto modifiers = m.GetAccessFlags();
+ // Add non-constructor direct/static methods.
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccConstructor) == 0) {
+ auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), &m);
+ if (method == nullptr) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
}
+ ret->SetWithoutChecks<false>(num_methods++, method);
}
}
return soa.AddLocalReference<jobjectArray>(ret.Get());
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index b9f8d01..a2d9797 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -18,6 +18,7 @@
#include "dex_file.h"
#include "jni_internal.h"
+#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "scoped_fast_native_object_access.h"
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 40d6584..9db47d8 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -16,10 +16,9 @@
#include "java_lang_reflect_Constructor.h"
+#include "art_method-inl.h"
#include "class_linker.h"
#include "jni_internal.h"
-#include "mirror/art_method.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index d6aa9b5..ba898c6 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -21,7 +21,6 @@
#include "common_throws.h"
#include "dex_file-inl.h"
#include "jni_internal.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/field.h"
#include "reflection-inl.h"
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index c20d832..9533b4d 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -16,10 +16,9 @@
#include "java_lang_reflect_Method.h"
+#include "art_method-inl.h"
#include "class_linker.h"
#include "jni_internal.h"
-#include "mirror/art_method.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -37,16 +36,17 @@ static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiv
static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtMethod* proxy_method = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
+ ArtMethod* proxy_method = ArtMethod::FromReflectedMethod(soa, javaMethod);
CHECK(proxy_method->GetDeclaringClass()->IsProxyClass());
mirror::Class* proxy_class = proxy_method->GetDeclaringClass();
int throws_index = -1;
- size_t num_virt_methods = proxy_class->NumVirtualMethods();
- for (size_t i = 0; i < num_virt_methods; i++) {
- if (proxy_class->GetVirtualMethod(i) == proxy_method) {
+ size_t i = 0;
+ for (const auto& m : proxy_class->GetVirtualMethods(sizeof(void*))) {
+ if (&m == proxy_method) {
throws_index = i;
break;
}
+ ++i;
}
CHECK_NE(throws_index, -1);
mirror::ObjectArray<mirror::Class>* declared_exceptions =
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index dfabff5..57b873b 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_NATIVE_SCOPED_FAST_NATIVE_OBJECT_ACCESS_H_
#define ART_RUNTIME_NATIVE_SCOPED_FAST_NATIVE_OBJECT_ACCESS_H_
-#include "mirror/art_method-inl.h"
+#include "art_method-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
@@ -31,7 +31,7 @@ class ScopedFastNativeObjectAccess : public ScopedObjectAccessAlreadyRunnable {
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
: ScopedObjectAccessAlreadyRunnable(env) {
Locks::mutator_lock_->AssertSharedHeld(Self());
- DCHECK(Self()->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr()->IsFastNative());
+ DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsFastNative());
// Don't work with raw objects in non-runnable states.
DCHECK_EQ(Self()->GetState(), kRunnable);
}
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 17ebdff..770644c 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -19,7 +19,7 @@
#include "gc/accounting/card_table-inl.h"
#include "jni_internal.h"
#include "mirror/array.h"
-#include "mirror/object.h"
+#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "scoped_fast_native_object_access.h"
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 0ad560e..46cc5aa 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -20,10 +20,10 @@
#include "nativebridge/native_bridge.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/macros.h"
#include "dex_file-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "scoped_thread_state_change.h"
#include "sigchain.h"
@@ -32,29 +32,24 @@ namespace art {
static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
ScopedObjectAccess soa(env);
- mirror::ArtMethod* m = soa.DecodeMethod(mid);
+ ArtMethod* m = soa.DecodeMethod(mid);
return m->GetShorty();
}
static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
- if (clazz == nullptr)
+ if (clazz == nullptr) {
return 0;
+ }
ScopedObjectAccess soa(env);
mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
uint32_t native_method_count = 0;
- for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
- mirror::ArtMethod* m = c->GetDirectMethod(i);
- if (m->IsNative()) {
- native_method_count++;
- }
+ for (auto& m : c->GetDirectMethods(sizeof(void*))) {
+ native_method_count += m.IsNative() ? 1u : 0u;
}
- for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
- mirror::ArtMethod* m = c->GetVirtualMethod(i);
- if (m->IsNative()) {
- native_method_count++;
- }
+ for (auto& m : c->GetVirtualMethods(sizeof(void*))) {
+ native_method_count += m.IsNative() ? 1u : 0u;
}
return native_method_count;
}
@@ -68,29 +63,27 @@ static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* met
mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
uint32_t count = 0;
- for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
- mirror::ArtMethod* m = c->GetDirectMethod(i);
- if (m->IsNative()) {
+ for (auto& m : c->GetDirectMethods(sizeof(void*))) {
+ if (m.IsNative()) {
if (count < method_count) {
- methods[count].name = m->GetName();
- methods[count].signature = m->GetShorty();
- methods[count].fnPtr = m->GetEntryPointFromJni();
+ methods[count].name = m.GetName();
+ methods[count].signature = m.GetShorty();
+ methods[count].fnPtr = m.GetEntryPointFromJni();
count++;
} else {
- LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(&m);
}
}
}
- for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
- mirror::ArtMethod* m = c->GetVirtualMethod(i);
- if (m->IsNative()) {
+ for (auto& m : c->GetVirtualMethods(sizeof(void*))) {
+ if (m.IsNative()) {
if (count < method_count) {
- methods[count].name = m->GetName();
- methods[count].signature = m->GetShorty();
- methods[count].fnPtr = m->GetEntryPointFromJni();
+ methods[count].name = m.GetName();
+ methods[count].signature = m.GetShorty();
+ methods[count].fnPtr = m.GetEntryPointFromJni();
count++;
} else {
- LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(&m);
}
}
}
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index d2d7fa8..7fe3130 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -17,8 +17,8 @@
#ifndef ART_RUNTIME_NTH_CALLER_VISITOR_H_
#define ART_RUNTIME_NTH_CALLER_VISITOR_H_
+#include "art_method.h"
#include "base/mutex.h"
-#include "mirror/art_method.h"
#include "stack.h"
namespace art {
@@ -34,7 +34,7 @@ struct NthCallerVisitor : public StackVisitor {
caller(nullptr) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
bool do_count = false;
if (m == nullptr || m->IsRuntimeMethod()) {
// Upcall.
@@ -56,7 +56,7 @@ struct NthCallerVisitor : public StackVisitor {
const size_t n;
const bool include_runtime_and_upcalls_;
size_t count;
- mirror::ArtMethod* caller;
+ ArtMethod* caller;
};
} // namespace art
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index a429c87..6b3b666 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -22,7 +22,7 @@
namespace art {
inline const OatQuickMethodHeader* OatFile::OatMethod::GetOatQuickMethodHeader() const {
- const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return nullptr;
}
@@ -39,7 +39,7 @@ inline uint32_t OatFile::OatMethod::GetOatQuickMethodHeaderOffset() const {
}
inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
- const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -55,7 +55,7 @@ inline uint32_t OatFile::OatMethod::GetQuickCodeSizeOffset() const {
}
inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
- const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -63,7 +63,7 @@ inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
}
inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
- const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -71,7 +71,7 @@ inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
}
inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
- const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -79,7 +79,7 @@ inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
}
const uint8_t* OatFile::OatMethod::GetGcMap() const {
- const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return nullptr;
}
@@ -130,7 +130,7 @@ inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const {
}
inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
- const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return nullptr;
}
@@ -142,7 +142,7 @@ inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
}
inline const uint8_t* OatFile::OatMethod::GetVmapTable() const {
- const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return nullptr;
}
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 63ee4b1..6fda790 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -28,14 +28,13 @@
#include "android/dlext.h"
#endif
+#include "art_method-inl.h"
#include "base/bit_vector.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "elf_file.h"
#include "elf_utils.h"
#include "oat.h"
-#include "mirror/art_method.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "os.h"
@@ -693,7 +692,7 @@ const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index)
return OatMethod(oat_file_->Begin(), 0);
}
-void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const {
+void OatFile::OatMethod::LinkMethod(ArtMethod* method) const {
CHECK(method != nullptr);
method->SetEntryPointFromQuickCompiledCode(GetQuickCode());
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 12e9f6c..c58b029 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -98,7 +98,7 @@ class OatFile FINAL {
class OatMethod FINAL {
public:
- void LinkMethod(mirror::ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void LinkMethod(ArtMethod* method) const;
uint32_t GetCodeOffset() const {
return code_offset_;
diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc
index 749fb5d..f7accc0 100644
--- a/runtime/object_lock.cc
+++ b/runtime/object_lock.cc
@@ -47,7 +47,6 @@ void ObjectLock<T>::NotifyAll() {
obj_->NotifyAll(self_);
}
-template class ObjectLock<mirror::ArtMethod>;
template class ObjectLock<mirror::Class>;
template class ObjectLock<mirror::Object>;
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 0ac5f40..ca42c47 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -26,7 +26,6 @@ namespace art {
static constexpr size_t kObjectReferenceSize = 4;
-
constexpr size_t ComponentSizeShiftWidth(size_t component_size) {
return component_size == 1u ? 0u :
component_size == 2u ? 1u :
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index f9218a3..ab28a9a 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -22,6 +22,7 @@
#include <fstream>
+#include "art_method-inl.h"
#include "base/stl_util.h"
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
@@ -30,7 +31,6 @@
#include "debugger.h"
#include "dex_file-inl.h"
#include "instrumentation.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
@@ -57,7 +57,7 @@ volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
// Walk through the method within depth of max_depth_ on the Java stack
class BoundedStackVisitor : public StackVisitor {
public:
- BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
+ BoundedStackVisitor(std::vector<std::pair<ArtMethod*, uint32_t>>* stack,
Thread* thread, uint32_t max_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -66,7 +66,7 @@ class BoundedStackVisitor : public StackVisitor {
depth_(0) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
}
@@ -81,7 +81,7 @@ class BoundedStackVisitor : public StackVisitor {
}
private:
- std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack_;
+ std::vector<std::pair<ArtMethod*, uint32_t>>* stack_;
const uint32_t max_depth_;
uint32_t depth_;
};
@@ -94,7 +94,7 @@ static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mu
const ProfilerOptions profile_options = profiler->GetProfilerOptions();
switch (profile_options.GetProfileType()) {
case kProfilerMethod: {
- mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
+ ArtMethod* method = thread->GetCurrentMethod(nullptr);
if ((false) && method == nullptr) {
LOG(INFO) << "No current method available";
std::ostringstream os;
@@ -400,7 +400,7 @@ BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
// Filter out methods the profiler doesn't want to record.
// We require mutator lock since some statistics will be updated here.
-bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
+bool BackgroundMethodSamplingProfiler::ProcessMethod(ArtMethod* method) {
if (method == nullptr) {
profile_table_.NullMethod();
// Don't record a null method.
@@ -435,7 +435,7 @@ bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method)
// A method has been hit, record its invocation in the method map.
// The mutator_lock must be held (shared) when this is called.
-void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) {
+void BackgroundMethodSamplingProfiler::RecordMethod(ArtMethod* method) {
// Add to the profile table unless it is filtered out.
if (ProcessMethod(method)) {
profile_table_.Put(method);
@@ -448,7 +448,7 @@ void BackgroundMethodSamplingProfiler::RecordStack(const std::vector<Instruction
return;
}
// Get the method on top of the stack. We use this method to perform filtering.
- mirror::ArtMethod* method = stack.front().first;
+ ArtMethod* method = stack.front().first;
if (ProcessMethod(method)) {
profile_table_.PutStack(stack);
}
@@ -464,7 +464,7 @@ uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
}
// Profile Table.
-// This holds a mapping of mirror::ArtMethod* to a count of how many times a sample
+// This holds a mapping of ArtMethod* to a count of how many times a sample
// hit it at the top of the stack.
ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0),
num_null_methods_(0),
@@ -482,7 +482,7 @@ ProfileSampleResults::~ProfileSampleResults() {
// Add a method to the profile table. If it's the first time the method
// has been seen, add it with count=1, otherwise increment the count.
-void ProfileSampleResults::Put(mirror::ArtMethod* method) {
+void ProfileSampleResults::Put(ArtMethod* method) {
MutexLock mu(Thread::Current(), lock_);
uint32_t index = Hash(method);
if (table[index] == nullptr) {
@@ -517,7 +517,7 @@ void ProfileSampleResults::PutStack(const std::vector<InstructionLocation>& stac
for (std::vector<InstructionLocation>::const_reverse_iterator iter = stack.rbegin();
iter != stack.rend(); ++iter) {
InstructionLocation inst_loc = *iter;
- mirror::ArtMethod* method = inst_loc.first;
+ ArtMethod* method = inst_loc.first;
if (method == nullptr) {
// skip null method
continue;
@@ -577,7 +577,7 @@ uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) {
Map *map = table[i];
if (map != nullptr) {
for (const auto &meth_iter : *map) {
- mirror::ArtMethod *method = meth_iter.first;
+ ArtMethod *method = meth_iter.first;
std::string method_name = PrettyMethod(method);
const DexFile::CodeItem* codeitem = method->GetCodeItem();
@@ -709,7 +709,7 @@ void ProfileSampleResults::Clear() {
previous_.clear();
}
-uint32_t ProfileSampleResults::Hash(mirror::ArtMethod* method) {
+uint32_t ProfileSampleResults::Hash(ArtMethod* method) {
return (PointerToLowMemUInt32(method) >> 3) % kHashSize;
}
diff --git a/runtime/profiler.h b/runtime/profiler.h
index ae51c87..7611487 100644
--- a/runtime/profiler.h
+++ b/runtime/profiler.h
@@ -36,12 +36,12 @@
namespace art {
namespace mirror {
- class ArtMethod;
class Class;
} // namespace mirror
+class ArtMethod;
class Thread;
-typedef std::pair<mirror::ArtMethod*, uint32_t> InstructionLocation;
+typedef std::pair<ArtMethod*, uint32_t> InstructionLocation;
// This class stores the sampled bounded stacks in a trie structure. A path of the trie represents
// a particular context with the method on top of the stack being a leaf or an internal node of the
@@ -104,7 +104,7 @@ class ProfileSampleResults {
explicit ProfileSampleResults(Mutex& lock);
~ProfileSampleResults();
- void Put(mirror::ArtMethod* method);
+ void Put(ArtMethod* method);
void PutStack(const std::vector<InstructionLocation>& stack_dump);
uint32_t Write(std::ostream &os, ProfileDataType type);
void ReadPrevious(int fd, ProfileDataType type);
@@ -114,14 +114,14 @@ class ProfileSampleResults {
void BootMethod() { ++num_boot_methods_; }
private:
- uint32_t Hash(mirror::ArtMethod* method);
+ uint32_t Hash(ArtMethod* method);
static constexpr int kHashSize = 17;
Mutex& lock_; // Reference to the main profiler lock - we don't need two of them.
uint32_t num_samples_; // Total number of samples taken.
uint32_t num_null_methods_; // Number of samples where can don't know the method.
uint32_t num_boot_methods_; // Number of samples in the boot path.
- typedef std::map<mirror::ArtMethod*, uint32_t> Map; // Map of method vs its count.
+ typedef std::map<ArtMethod*, uint32_t> Map; // Map of method vs its count.
Map *table[kHashSize];
typedef std::set<StackTrieNode*> TrieNodeSet;
@@ -176,9 +176,9 @@ class BackgroundMethodSamplingProfiler {
static void Stop() LOCKS_EXCLUDED(Locks::profiler_lock_, wait_lock_);
static void Shutdown() LOCKS_EXCLUDED(Locks::profiler_lock_);
- void RecordMethod(mirror::ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RecordMethod(ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RecordStack(const std::vector<InstructionLocation>& stack) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool ProcessMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ProcessMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const ProfilerOptions& GetProfilerOptions() const { return options_; }
Barrier& GetBarrier() {
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 93d1f66..f40c0f1 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -52,8 +52,7 @@ class ProxyTest : public CommonCompilerTest {
// Builds the method array.
jsize methods_count = 3; // Object.equals, Object.hashCode and Object.toString.
for (mirror::Class* interface : interfaces) {
- mirror::ObjectArray<mirror::ArtMethod>* virtual_methods = interface->GetVirtualMethods();
- methods_count += (virtual_methods == nullptr) ? 0 : virtual_methods->GetLength();
+ methods_count += interface->NumVirtualMethods();
}
jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(
methods_count, soa.AddLocalReference<jclass>(mirror::Method::StaticClass()), nullptr);
@@ -61,28 +60,29 @@ class ProxyTest : public CommonCompilerTest {
jsize array_index = 0;
// Fill the method array
- mirror::ArtMethod* method = javaLangObject->FindDeclaredVirtualMethod(
- "equals", "(Ljava/lang/Object;)Z");
+ ArtMethod* method = javaLangObject->FindDeclaredVirtualMethod(
+ "equals", "(Ljava/lang/Object;)Z", sizeof(void*));
CHECK(method != nullptr);
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
mirror::Method::CreateFromArtMethod(soa.Self(), method)));
- method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I");
+ method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I", sizeof(void*));
CHECK(method != nullptr);
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
mirror::Method::CreateFromArtMethod(soa.Self(), method)));
- method = javaLangObject->FindDeclaredVirtualMethod("toString", "()Ljava/lang/String;");
+ method = javaLangObject->FindDeclaredVirtualMethod(
+ "toString", "()Ljava/lang/String;", sizeof(void*));
CHECK(method != nullptr);
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
mirror::Method::CreateFromArtMethod(soa.Self(), method)));
// Now adds all interfaces virtual methods.
for (mirror::Class* interface : interfaces) {
- for (int32_t i = 0, count = interface->NumVirtualMethods(); i < count; ++i) {
+ for (auto& m : interface->GetVirtualMethods(sizeof(void*))) {
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod(soa.Self(), interface->GetVirtualMethod(i))));
+ mirror::Method::CreateFromArtMethod(soa.Self(), &m)));
}
}
CHECK_EQ(array_index, methods_count);
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 1c404ff..99e262e 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -17,11 +17,11 @@
#include "inline_method_analyser.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "dex_instruction.h"
#include "dex_instruction-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "verifier/method_verifier-inl.h"
@@ -330,8 +330,9 @@ bool InlineMethodAnalyser::ComputeSpecialAccessorInfo(uint32_t field_idx, bool i
InlineIGetIPutData* result) {
mirror::DexCache* dex_cache = verifier->GetDexCache();
uint32_t method_idx = verifier->GetMethodReference().dex_method_index;
- mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
- ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(field_idx, dex_cache);
+ auto* cl = Runtime::Current()->GetClassLinker();
+ ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, cl->GetImagePointerSize());
+ ArtField* field = cl->GetResolvedField(field_idx, dex_cache);
if (method == nullptr || field == nullptr || field->IsStatic()) {
return false;
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 730759a..8c9782a 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -17,11 +17,11 @@
#include "quick_exception_handler.h"
#include "arch/context.h"
+#include "art_method-inl.h"
#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "handle_scope-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/throwable.h"
@@ -53,14 +53,14 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = GetMethod();
+ ArtMethod* method = GetMethod();
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
if (method == nullptr) {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
uint32_t next_dex_pc;
- mirror::ArtMethod* next_art_method;
+ ArtMethod* next_art_method;
bool has_next = GetNextMethodAndDexPc(&next_art_method, &next_dex_pc);
// Report the method that did the down call as the handler.
exception_handler_->SetHandlerDexPc(next_dex_pc);
@@ -78,12 +78,11 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
DCHECK(method->IsCalleeSaveMethod());
return true;
}
- StackHandleScope<1> hs(self_);
- return HandleTryItems(hs.NewHandle(method));
+ return HandleTryItems(method);
}
private:
- bool HandleTryItems(Handle<mirror::ArtMethod> method)
+ bool HandleTryItems(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t dex_pc = DexFile::kDexNoIndex;
if (!method->IsNative()) {
@@ -91,13 +90,12 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
}
if (dex_pc != DexFile::kDexNoIndex) {
bool clear_exception = false;
- StackHandleScope<1> hs(Thread::Current());
+ StackHandleScope<1> hs(self_);
Handle<mirror::Class> to_find(hs.NewHandle((*exception_)->GetClass()));
- uint32_t found_dex_pc = mirror::ArtMethod::FindCatchBlock(method, to_find, dex_pc,
- &clear_exception);
+ uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception);
exception_handler_->SetClearException(clear_exception);
if (found_dex_pc != DexFile::kDexNoIndex) {
- exception_handler_->SetHandlerMethod(method.Get());
+ exception_handler_->SetHandlerMethod(method);
exception_handler_->SetHandlerDexPc(found_dex_pc);
exception_handler_->SetHandlerQuickFramePc(method->ToNativeQuickPc(found_dex_pc));
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
@@ -132,7 +130,7 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
visitor.WalkStack(true);
if (kDebugExceptionDelivery) {
- if (handler_quick_frame_->AsMirrorPtr() == nullptr) {
+ if (*handler_quick_frame_ == nullptr) {
LOG(INFO) << "Handler is upcall";
}
if (handler_method_ != nullptr) {
@@ -171,7 +169,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
- mirror::ArtMethod* method = GetMethod();
+ ArtMethod* method = GetMethod();
if (method == nullptr) {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
@@ -191,23 +189,21 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
return static_cast<VRegKind>(kinds.at(reg * 2));
}
- bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HandleDeoptimization(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
CHECK(code_item != nullptr);
uint16_t num_regs = code_item->registers_size_;
uint32_t dex_pc = GetDexPc();
- StackHandleScope<3> hs(self_); // Dex cache, class loader and method.
+ StackHandleScope<2> hs(self_); // Dex cache, class loader and method.
mirror::Class* declaring_class = m->GetDeclaringClass();
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- Handle<mirror::ArtMethod> h_method(hs.NewHandle(m));
verifier::MethodVerifier verifier(self_, h_dex_cache->GetDexFile(), h_dex_cache, h_class_loader,
&m->GetClassDef(), code_item, m->GetDexMethodIndex(),
- h_method, m->GetAccessFlags(), true, true, true, true);
+ m, m->GetAccessFlags(), true, true, true, true);
bool verifier_success = verifier.Verify();
- CHECK(verifier_success) << PrettyMethod(h_method.Get());
- ShadowFrame* new_frame = ShadowFrame::CreateDeoptimizedFrame(
- num_regs, nullptr, h_method.Get(), dex_pc);
+ CHECK(verifier_success) << PrettyMethod(m);
+ ShadowFrame* new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, m, dex_pc);
self_->SetShadowFrameUnderConstruction(new_frame);
const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
@@ -230,7 +226,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
// Check IsReferenceVReg in case the compiled GC map doesn't agree with the verifier.
// We don't want to copy a stale reference into the shadow frame as a reference.
// b/20736048
- if (GetVReg(h_method.Get(), reg, kind, &value) && IsReferenceVReg(h_method.Get(), reg)) {
+ if (GetVReg(m, reg, kind, &value) && IsReferenceVReg(m, reg)) {
new_frame->SetVRegReference(reg, reinterpret_cast<mirror::Object*>(value));
} else {
new_frame->SetVReg(reg, kDeadValue);
@@ -241,14 +237,14 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
if (GetVRegKind(reg + 1, kinds) == kLongHiVReg) {
// Treat it as a "long" register pair.
uint64_t value = 0;
- if (GetVRegPair(h_method.Get(), reg, kLongLoVReg, kLongHiVReg, &value)) {
+ if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &value)) {
new_frame->SetVRegLong(reg, value);
} else {
new_frame->SetVRegLong(reg, kLongDeadValue);
}
} else {
uint32_t value = 0;
- if (GetVReg(h_method.Get(), reg, kind, &value)) {
+ if (GetVReg(m, reg, kind, &value)) {
new_frame->SetVReg(reg, value);
} else {
new_frame->SetVReg(reg, kDeadValue);
@@ -260,7 +256,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
// Nothing to do: we treated it as a "long" register pair.
} else {
uint32_t value = 0;
- if (GetVReg(h_method.Get(), reg, kind, &value)) {
+ if (GetVReg(m, reg, kind, &value)) {
new_frame->SetVReg(reg, value);
} else {
new_frame->SetVReg(reg, kDeadValue);
@@ -270,7 +266,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
case kDoubleLoVReg:
if (GetVRegKind(reg + 1, kinds) == kDoubleHiVReg) {
uint64_t value = 0;
- if (GetVRegPair(h_method.Get(), reg, kDoubleLoVReg, kDoubleHiVReg, &value)) {
+ if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &value)) {
// Treat it as a "double" register pair.
new_frame->SetVRegLong(reg, value);
} else {
@@ -278,7 +274,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
} else {
uint32_t value = 0;
- if (GetVReg(h_method.Get(), reg, kind, &value)) {
+ if (GetVReg(m, reg, kind, &value)) {
new_frame->SetVReg(reg, value);
} else {
new_frame->SetVReg(reg, kDeadValue);
@@ -290,7 +286,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
// Nothing to do: we treated it as a "double" register pair.
} else {
uint32_t value = 0;
- if (GetVReg(h_method.Get(), reg, kind, &value)) {
+ if (GetVReg(m, reg, kind, &value)) {
new_frame->SetVReg(reg, value);
} else {
new_frame->SetVReg(reg, kDeadValue);
@@ -299,7 +295,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
break;
default:
uint32_t value = 0;
- if (GetVReg(h_method.Get(), reg, kind, &value)) {
+ if (GetVReg(m, reg, kind, &value)) {
new_frame->SetVReg(reg, value);
} else {
new_frame->SetVReg(reg, kDeadValue);
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 7ee4118..8d7cd12 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -25,9 +25,9 @@
namespace art {
namespace mirror {
-class ArtMethod;
class Throwable;
} // namespace mirror
+class ArtMethod;
class Context;
class Thread;
class ShadowFrame;
@@ -48,7 +48,7 @@ class QuickExceptionHandler {
void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
NO_RETURN void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetHandlerQuickFrame(StackReference<mirror::ArtMethod>* handler_quick_frame) {
+ void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) {
handler_quick_frame_ = handler_quick_frame;
}
@@ -56,11 +56,11 @@ class QuickExceptionHandler {
handler_quick_frame_pc_ = handler_quick_frame_pc;
}
- mirror::ArtMethod* GetHandlerMethod() const {
+ ArtMethod* GetHandlerMethod() const {
return handler_method_;
}
- void SetHandlerMethod(mirror::ArtMethod* handler_quick_method) {
+ void SetHandlerMethod(ArtMethod* handler_quick_method) {
handler_method_ = handler_quick_method;
}
@@ -87,11 +87,11 @@ class QuickExceptionHandler {
// Is method tracing active?
const bool method_tracing_active_;
// Quick frame with found handler or last frame if no handler found.
- StackReference<mirror::ArtMethod>* handler_quick_frame_;
+ ArtMethod** handler_quick_frame_;
// PC to branch to for the handler.
uintptr_t handler_quick_frame_pc_;
// The handler method to report to the debugger.
- mirror::ArtMethod* handler_method_;
+ ArtMethod* handler_method_;
// The handler's dex PC, zero implies an uncaught exception.
uint32_t handler_dex_pc_;
// Should the exception be cleared as the catch block has no move-exception?
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 52d83a2..aa72e97 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -28,12 +28,11 @@
// which needs to be a C header file for asm_support.h.
namespace art {
-
namespace mirror {
- class ArtMethod;
class Object;
template<typename MirrorType> class HeapReference;
} // namespace mirror
+class ArtMethod;
class ReadBarrier {
public:
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index 4ffebf2..fae8e72 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -18,7 +18,9 @@
#include "common_runtime_test.h"
#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
#include "mirror/string.h"
+#include "primitive.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f8c7081..11522d9 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -17,6 +17,7 @@
#include "reflection-inl.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -24,7 +25,6 @@
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
#include "mirror/abstract_method.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "nth_caller_visitor.h"
@@ -213,10 +213,9 @@ class ArgArray {
}
bool BuildArgArrayFromObjectArray(mirror::Object* receiver,
- mirror::ObjectArray<mirror::Object>* args,
- Handle<mirror::ArtMethod> h_m)
+ mirror::ObjectArray<mirror::Object>* args, ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const DexFile::TypeList* classes = h_m->GetParameterTypeList();
+ const DexFile::TypeList* classes = m->GetParameterTypeList();
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
Append(receiver);
@@ -225,11 +224,11 @@ class ArgArray {
mirror::Object* arg = args->Get(args_offset);
if (((shorty_[i] == 'L') && (arg != nullptr)) || ((arg == nullptr && shorty_[i] != 'L'))) {
mirror::Class* dst_class =
- h_m->GetClassFromTypeIndex(classes->GetTypeItem(args_offset).type_idx_, true);
+ m->GetClassFromTypeIndex(classes->GetTypeItem(args_offset).type_idx_, true);
if (UNLIKELY(arg == nullptr || !arg->InstanceOf(dst_class))) {
ThrowIllegalArgumentException(
StringPrintf("method %s argument %zd has type %s, got %s",
- PrettyMethod(h_m.Get(), false).c_str(),
+ PrettyMethod(m, false).c_str(),
args_offset + 1, // Humans don't count from 0.
PrettyDescriptor(dst_class).c_str(),
PrettyTypeOf(arg).c_str()).c_str());
@@ -257,7 +256,7 @@ class ArgArray {
} else { \
ThrowIllegalArgumentException(\
StringPrintf("method %s argument %zd has type %s, got %s", \
- PrettyMethod(h_m.Get(), false).c_str(), \
+ PrettyMethod(m, false).c_str(), \
args_offset + 1, \
expected, \
PrettyTypeOf(arg).c_str()).c_str()); \
@@ -343,7 +342,7 @@ class ArgArray {
std::unique_ptr<uint32_t[]> large_arg_array_;
};
-static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t* args)
+static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::TypeList* params = m->GetParameterTypeList();
if (params == nullptr) {
@@ -356,16 +355,14 @@ static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t*
offset = 1;
}
// TODO: If args contain object references, it may cause problems.
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> h_m(hs.NewHandle(m));
+ Thread* const self = Thread::Current();
for (uint32_t i = 0; i < num_params; i++) {
uint16_t type_idx = params->GetTypeItem(i).type_idx_;
- mirror::Class* param_type = h_m->GetClassFromTypeIndex(type_idx, true);
+ mirror::Class* param_type = m->GetClassFromTypeIndex(type_idx, true);
if (param_type == nullptr) {
CHECK(self->IsExceptionPending());
LOG(ERROR) << "Internal error: unresolvable type for argument type in JNI invoke: "
- << h_m->GetTypeDescriptorFromTypeIdx(type_idx) << "\n"
+ << m->GetTypeDescriptorFromTypeIdx(type_idx) << "\n"
<< self->GetException()->Dump();
self->ClearException();
++error_count;
@@ -378,7 +375,7 @@ static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t*
if (argument != nullptr && !argument->InstanceOf(param_type)) {
LOG(ERROR) << "JNI ERROR (app bug): attempt to pass an instance of "
<< PrettyTypeOf(argument) << " as argument " << (i + 1)
- << " to " << PrettyMethod(h_m.Get());
+ << " to " << PrettyMethod(m);
++error_count;
}
} else if (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble()) {
@@ -388,25 +385,25 @@ static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t*
if (param_type->IsPrimitiveBoolean()) {
if (arg != JNI_TRUE && arg != JNI_FALSE) {
LOG(ERROR) << "JNI ERROR (app bug): expected jboolean (0/1) but got value of "
- << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(m);
++error_count;
}
} else if (param_type->IsPrimitiveByte()) {
if (arg < -128 || arg > 127) {
LOG(ERROR) << "JNI ERROR (app bug): expected jbyte but got value of "
- << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(m);
++error_count;
}
} else if (param_type->IsPrimitiveChar()) {
if (args[i + offset] > 0xFFFF) {
LOG(ERROR) << "JNI ERROR (app bug): expected jchar but got value of "
- << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(m);
++error_count;
}
} else if (param_type->IsPrimitiveShort()) {
if (arg < -32768 || arg > 0x7FFF) {
LOG(ERROR) << "JNI ERROR (app bug): expected jshort but got value of "
- << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(m);
++error_count;
}
}
@@ -416,24 +413,23 @@ static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t*
// TODO: pass the JNI function name (such as "CallVoidMethodV") through so we can call JniAbort
// with an argument.
vm->JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
- PrettyMethod(h_m.Get()).c_str());
+ PrettyMethod(m).c_str());
}
}
-static mirror::ArtMethod* FindVirtualMethod(mirror::Object* receiver,
- mirror::ArtMethod* method)
+static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method);
+ return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, sizeof(void*));
}
static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
- mirror::ArtMethod* method, ArgArray* arg_array, JValue* result,
+ ArtMethod* method, ArgArray* arg_array, JValue* result,
const char* shorty)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t* args = arg_array->GetArray();
if (UNLIKELY(soa.Env()->check_jni)) {
- CheckMethodArguments(soa.Vm(), method, args);
+ CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(sizeof(void*)), args);
}
method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
}
@@ -449,7 +445,7 @@ JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
return JValue();
}
- mirror::ArtMethod* method = soa.DecodeMethod(mid);
+ ArtMethod* method = soa.DecodeMethod(mid);
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
@@ -479,7 +475,7 @@ JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
return JValue();
}
- mirror::ArtMethod* method = soa.DecodeMethod(mid);
+ ArtMethod* method = soa.DecodeMethod(mid);
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
@@ -510,7 +506,7 @@ JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnab
}
mirror::Object* receiver = soa.Decode<mirror::Object*>(obj);
- mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+ ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
@@ -541,7 +537,7 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab
}
mirror::Object* receiver = soa.Decode<mirror::Object*>(obj);
- mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+ ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
@@ -574,7 +570,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(javaMethod);
const bool accessible = abstract_method->IsAccessible();
- mirror::ArtMethod* m = abstract_method->GetArtMethod();
+ ArtMethod* m = abstract_method->GetArtMethod();
mirror::Class* declaring_class = m->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
@@ -601,13 +597,14 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
}
// Find the actual implementation of the virtual method.
- m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m);
+ m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m, sizeof(void*));
}
}
// Get our arrays of arguments and their types, and check they're the same size.
auto* objects = soa.Decode<mirror::ObjectArray<mirror::Object>*>(javaArgs);
- const DexFile::TypeList* classes = m->GetParameterTypeList();
+ auto* np_method = m->GetInterfaceMethodIfProxy(sizeof(void*));
+ const DexFile::TypeList* classes = np_method->GetParameterTypeList();
uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0;
if (arg_count != classes_size) {
@@ -633,11 +630,9 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
// Invoke the method.
JValue result;
uint32_t shorty_len = 0;
- const char* shorty = m->GetShorty(&shorty_len);
+ const char* shorty = np_method->GetShorty(&shorty_len);
ArgArray arg_array(shorty, shorty_len);
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ArtMethod> h_m(hs.NewHandle(m));
- if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, h_m)) {
+ if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, np_method)) {
CHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
diff --git a/runtime/reflection.h b/runtime/reflection.h
index df3b9d3..825a721 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -23,11 +23,11 @@
namespace art {
namespace mirror {
- class ArtMethod;
class Class;
class Object;
} // namespace mirror
class ArtField;
+class ArtMethod;
union JValue;
class ScopedObjectAccessAlreadyRunnable;
class ShadowFrame;
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 36e444a..6f17e7d 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -20,8 +20,8 @@
#include <limits.h>
#include "ScopedLocalRef.h"
+#include "art_method-inl.h"
#include "common_compiler_test.h"
-#include "mirror/art_method-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
@@ -81,7 +81,7 @@ class ReflectionTest : public CommonCompilerTest {
return soa.AddLocalReference<jclass>(c);
}
- void ReflectionTestMakeExecutable(mirror::ArtMethod** method,
+ void ReflectionTestMakeExecutable(ArtMethod** method,
mirror::Object** receiver,
bool is_static, const char* method_name,
const char* method_signature)
@@ -107,8 +107,8 @@ class ReflectionTest : public CommonCompilerTest {
class_loader);
CHECK(c != nullptr);
- *method = is_static ? c->FindDirectMethod(method_name, method_signature)
- : c->FindVirtualMethod(method_name, method_signature);
+ *method = is_static ? c->FindDirectMethod(method_name, method_signature, sizeof(void*))
+ : c->FindVirtualMethod(method_name, method_signature, sizeof(void*));
CHECK(method != nullptr);
if (is_static) {
@@ -130,7 +130,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeNopMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "nop", "()V");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -139,7 +139,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeIdentityByteMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(B)B");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -164,7 +164,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeIdentityIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(I)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -189,7 +189,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeIdentityDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(D)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -214,7 +214,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(II)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -243,7 +243,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(III)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -282,7 +282,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIII)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -326,7 +326,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntIntIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIIII)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -375,7 +375,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -409,7 +409,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -436,7 +436,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -466,7 +466,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
- mirror::ArtMethod* method;
+ ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDDD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
@@ -515,7 +515,7 @@ TEST_F(ReflectionTest, StaticMainMethod) {
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
ASSERT_TRUE(klass != nullptr);
- mirror::ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V");
+ ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V", sizeof(void*));
ASSERT_TRUE(method != nullptr);
// Start runtime.
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index a82bc85..68d5ad2 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -19,7 +19,7 @@
#include "runtime.h"
-#include "mirror/art_method.h"
+#include "art_method.h"
#include "read_barrier-inl.h"
namespace art {
@@ -34,52 +34,46 @@ inline mirror::Object* Runtime::GetClearedJniWeakGlobal() {
return obj;
}
-inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(mirror::ArtMethod* method) {
+inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(ArtMethod* method) {
DCHECK(method != nullptr);
// Cannot be imt-conflict-method or resolution-method.
- DCHECK(method != GetImtConflictMethod());
- DCHECK(method != GetResolutionMethod());
+ DCHECK_NE(method, GetImtConflictMethod());
+ DCHECK_NE(method, GetResolutionMethod());
// Don't use GetCalleeSaveMethod(), some tests don't set all callee save methods.
if (method == GetCalleeSaveMethodUnchecked(Runtime::kRefsAndArgs)) {
return GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
} else if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveAll)) {
return GetCalleeSaveMethodFrameInfo(Runtime::kSaveAll);
} else {
- DCHECK(method == GetCalleeSaveMethodUnchecked(Runtime::kRefsOnly));
+ DCHECK_EQ(method, GetCalleeSaveMethodUnchecked(Runtime::kRefsOnly));
return GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly);
}
}
-inline mirror::ArtMethod* Runtime::GetResolutionMethod() {
+inline ArtMethod* Runtime::GetResolutionMethod() {
CHECK(HasResolutionMethod());
- return resolution_method_.Read();
+ return resolution_method_;
}
-inline mirror::ArtMethod* Runtime::GetImtConflictMethod() {
+inline ArtMethod* Runtime::GetImtConflictMethod() {
CHECK(HasImtConflictMethod());
- return imt_conflict_method_.Read();
+ return imt_conflict_method_;
}
-inline mirror::ArtMethod* Runtime::GetImtUnimplementedMethod() {
- CHECK(!imt_unimplemented_method_.IsNull());
- return imt_unimplemented_method_.Read();
+inline ArtMethod* Runtime::GetImtUnimplementedMethod() {
+ CHECK(imt_unimplemented_method_ != nullptr);
+ return imt_unimplemented_method_;
}
-inline mirror::ObjectArray<mirror::ArtMethod>* Runtime::GetDefaultImt()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(HasDefaultImt());
- return default_imt_.Read();
-}
-
-inline mirror::ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type)
+inline ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(HasCalleeSaveMethod(type));
- return callee_save_methods_[type].Read();
+ return GetCalleeSaveMethodUnchecked(type);
}
-inline mirror::ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
+inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return callee_save_methods_[type].Read();
+ return reinterpret_cast<ArtMethod*>(callee_save_methods_[type]);
}
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2618661..65ea77a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -49,6 +49,7 @@
#include "arch/x86_64/quick_method_frame_info_x86_64.h"
#include "arch/x86_64/registers_x86_64.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "asm_support.h"
#include "atomic.h"
#include "base/arena_allocator.h"
@@ -73,7 +74,6 @@
#include "jni_internal.h"
#include "linear_alloc.h"
#include "mirror/array.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field.h"
@@ -189,6 +189,7 @@ Runtime::Runtime()
is_native_bridge_loaded_(false),
zygote_max_failed_boots_(0) {
CheckAsmSupportOffsetsAndSizes();
+ std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
}
Runtime::~Runtime() {
@@ -425,20 +426,20 @@ static jobject CreateSystemClassLoader(Runtime* runtime) {
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ auto pointer_size = cl->GetImagePointerSize();
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> class_loader_class(
hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));
- mirror::ArtMethod* getSystemClassLoader =
- class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;");
+ ArtMethod* getSystemClassLoader = class_loader_class->FindDirectMethod(
+ "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
CHECK(getSystemClassLoader != nullptr);
JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
- ScopedLocalRef<jobject> system_class_loader(env,
- soa.AddLocalReference<jobject>(result.GetL()));
+ ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
CHECK(system_class_loader.get() != nullptr);
soa.Self()->SetClassLoaderOverride(system_class_loader.get());
@@ -867,18 +868,17 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
}
jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
- bool use_jit = jit_options_->UseJIT();
if (IsAotCompiler()) {
// If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
// this case.
// If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
// null and we don't create the jit.
- use_jit = false;
+ jit_options_->SetUseJIT(false);
}
// Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
// can't be trimmed as easily.
- const bool use_malloc = !use_jit;
+ const bool use_malloc = IsAotCompiler();
arena_pool_.reset(new ArenaPool(use_malloc, false));
if (IsCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
// 4gb, no malloc. Explanation in header.
@@ -1089,6 +1089,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
}
VLOG(startup) << "Runtime::Init exiting";
+
return true;
}
@@ -1311,7 +1312,6 @@ mirror::Throwable* Runtime::GetPreAllocatedNoClassDefFoundError() {
void Runtime::VisitConstantRoots(RootVisitor* visitor) {
// Visit the classes held as static in mirror classes, these can be visited concurrently and only
// need to be visited once per GC since they never change.
- mirror::ArtMethod::VisitRoots(visitor);
mirror::Class::VisitRoots(visitor);
mirror::Constructor::VisitRoots(visitor);
mirror::Reference::VisitRoots(visitor);
@@ -1329,6 +1329,24 @@ void Runtime::VisitConstantRoots(RootVisitor* visitor) {
mirror::PrimitiveArray<int32_t>::VisitRoots(visitor); // IntArray
mirror::PrimitiveArray<int64_t>::VisitRoots(visitor); // LongArray
mirror::PrimitiveArray<int16_t>::VisitRoots(visitor); // ShortArray
+ // Visiting the roots of these ArtMethods is not currently required since all the GcRoots are
+ // null.
+ BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal));
+ if (HasResolutionMethod()) {
+ resolution_method_->VisitRoots(buffered_visitor);
+ }
+ if (HasImtConflictMethod()) {
+ imt_conflict_method_->VisitRoots(buffered_visitor);
+ }
+ if (imt_unimplemented_method_ != nullptr) {
+ imt_unimplemented_method_->VisitRoots(buffered_visitor);
+ }
+ for (size_t i = 0; i < kLastCalleeSaveType; ++i) {
+ auto* m = reinterpret_cast<ArtMethod*>(callee_save_methods_[i]);
+ if (m != nullptr) {
+ m->VisitRoots(buffered_visitor);
+ }
+ }
}
void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
@@ -1350,17 +1368,9 @@ void Runtime::VisitNonThreadRoots(RootVisitor* visitor) {
java_vm_->VisitRoots(visitor);
sentinel_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
pre_allocated_OutOfMemoryError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- resolution_method_.VisitRoot(visitor, RootInfo(kRootVMInternal));
pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- imt_conflict_method_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- imt_unimplemented_method_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- default_imt_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- callee_save_methods_[i].VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- }
verifier::MethodVerifier::VisitStaticRoots(visitor);
VisitTransactionRoots(visitor);
- instrumentation_.VisitRoots(visitor);
}
void Runtime::VisitNonConcurrentRoots(RootVisitor* visitor) {
@@ -1399,73 +1409,43 @@ void Runtime::VisitImageRoots(RootVisitor* visitor) {
}
}
-mirror::ObjectArray<mirror::ArtMethod>* Runtime::CreateDefaultImt(ClassLinker* cl) {
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- Handle<mirror::ObjectArray<mirror::ArtMethod>> imtable(
- hs.NewHandle(cl->AllocArtMethodArray(self, 64)));
- mirror::ArtMethod* imt_conflict_method = Runtime::Current()->GetImtConflictMethod();
- for (size_t i = 0; i < static_cast<size_t>(imtable->GetLength()); i++) {
- imtable->Set<false>(i, imt_conflict_method);
- }
- return imtable.Get();
-}
-
-mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
- Thread* self = Thread::Current();
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self)));
- method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
- // TODO: use a special method for imt conflict method saves.
- method->SetDexMethodIndex(DexFile::kDexNoIndex);
+ArtMethod* Runtime::CreateImtConflictMethod() {
+ auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod();
// When compiling, the code pointer will get set later when the image is loaded.
- if (runtime->IsAotCompiler()) {
+ if (IsAotCompiler()) {
size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
}
- return method.Get();
+ return method;
}
-void Runtime::SetImtConflictMethod(mirror::ArtMethod* method) {
- imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
+void Runtime::SetImtConflictMethod(ArtMethod* method) {
+ CHECK(method != nullptr);
+ CHECK(method->IsRuntimeMethod());
+ imt_conflict_method_ = method;
}
-mirror::ArtMethod* Runtime::CreateResolutionMethod() {
- Thread* self = Thread::Current();
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self)));
- method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
- // TODO: use a special method for resolution method saves
- method->SetDexMethodIndex(DexFile::kDexNoIndex);
+ArtMethod* Runtime::CreateResolutionMethod() {
+ auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod();
// When compiling, the code pointer will get set later when the image is loaded.
- if (runtime->IsAotCompiler()) {
+ if (IsAotCompiler()) {
size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
}
- return method.Get();
+ return method;
}
-mirror::ArtMethod* Runtime::CreateCalleeSaveMethod() {
- Thread* self = Thread::Current();
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self)));
- method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
- // TODO: use a special method for callee saves
- method->SetDexMethodIndex(DexFile::kDexNoIndex);
+ArtMethod* Runtime::CreateCalleeSaveMethod() {
+ auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod();
size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
DCHECK_NE(instruction_set_, kNone);
- return method.Get();
+ DCHECK(method->IsRuntimeMethod());
+ return method;
}
void Runtime::DisallowNewSystemWeaks() {
@@ -1525,15 +1505,16 @@ void Runtime::SetInstructionSet(InstructionSet instruction_set) {
}
}
-void Runtime::SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type) {
+void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType));
- callee_save_methods_[type] = GcRoot<mirror::ArtMethod>(method);
+ CHECK(method != nullptr);
+ callee_save_methods_[type] = reinterpret_cast<uintptr_t>(method);
}
void Runtime::StartProfiler(const char* profile_output_filename) {
profile_output_filename_ = profile_output_filename;
profiler_started_ =
- BackgroundMethodSamplingProfiler::Start(profile_output_filename_, profiler_options_);
+ BackgroundMethodSamplingProfiler::Start(profile_output_filename_, profiler_options_);
}
// Transaction support.
@@ -1550,7 +1531,6 @@ void Runtime::ExitTransactionMode() {
preinitialization_transaction_ = nullptr;
}
-
bool Runtime::IsTransactionAborted() const {
if (!IsActiveTransaction()) {
return false;
@@ -1709,4 +1689,16 @@ bool Runtime::IsCompilingBootImage() const {
return IsCompiler() && compiler_callbacks_->IsBootImage();
}
+void Runtime::SetResolutionMethod(ArtMethod* method) {
+ CHECK(method != nullptr);
+ CHECK(method->IsRuntimeMethod()) << method;
+ resolution_method_ = method;
+}
+
+void Runtime::SetImtUnimplementedMethod(ArtMethod* method) {
+ CHECK(method != nullptr);
+ CHECK(method->IsRuntimeMethod());
+ imt_unimplemented_method_ = method;
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 348d5c6..e569333 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -41,10 +41,6 @@
namespace art {
-class ArenaPool;
-class CompilerCallbacks;
-class LinearAlloc;
-
namespace gc {
class Heap;
namespace collector {
@@ -58,7 +54,6 @@ namespace jit {
} // namespace jit
namespace mirror {
- class ArtMethod;
class ClassLoader;
class Array;
template<class T> class ObjectArray;
@@ -70,11 +65,15 @@ namespace mirror {
namespace verifier {
class MethodVerifier;
} // namespace verifier
+class ArenaPool;
+class ArtMethod;
class ClassLinker;
class Closure;
+class CompilerCallbacks;
class DexFile;
class InternTable;
class JavaVMExt;
+class LinearAlloc;
class MonitorList;
class MonitorPool;
class NullPointerHandler;
@@ -99,6 +98,9 @@ enum VisitRootFlags : uint8_t {
kVisitRootFlagStartLoggingNewRoots = 0x4,
kVisitRootFlagStopLoggingNewRoots = 0x8,
kVisitRootFlagClearRootLog = 0x10,
+ // Non moving means we can have optimizations where we don't visit some roots if they are
+ // definitely reachable from another location. E.g. ArtMethod and ArtField roots.
+ kVisitRootFlagNonMoving = 0x20,
};
class Runtime {
@@ -342,47 +344,28 @@ class Runtime {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime method resolution
- mirror::ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasResolutionMethod() const {
- return !resolution_method_.IsNull();
+ return resolution_method_ != nullptr;
}
- void SetResolutionMethod(mirror::ArtMethod* method) {
- resolution_method_ = GcRoot<mirror::ArtMethod>(method);
- }
+ void SetResolutionMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime imt conflicts.
- mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasImtConflictMethod() const {
- return !imt_conflict_method_.IsNull();
+ return imt_conflict_method_ != nullptr;
}
- void SetImtConflictMethod(mirror::ArtMethod* method);
- void SetImtUnimplementedMethod(mirror::ArtMethod* method) {
- imt_unimplemented_method_ = GcRoot<mirror::ArtMethod>(method);
- }
+ void SetImtConflictMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetImtUnimplementedMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Returns an imt with every entry set to conflict, used as default imt for all classes.
- mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- bool HasDefaultImt() const {
- return !default_imt_.IsNull();
- }
-
- void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) {
- default_imt_ = GcRoot<mirror::ObjectArray<mirror::ArtMethod>>(imt);
- }
-
- mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns a special method that describes all callee saves being spilled to the stack.
enum CalleeSaveType {
@@ -393,20 +376,20 @@ class Runtime {
};
bool HasCalleeSaveMethod(CalleeSaveType type) const {
- return !callee_save_methods_[type].IsNull();
+ return callee_save_methods_[type] != 0u;
}
- mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
+ ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
+ ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
return callee_save_method_frame_infos_[type];
}
- QuickMethodFrameInfo GetRuntimeMethodFrameInfo(mirror::ArtMethod* method)
+ QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
@@ -419,9 +402,9 @@ class Runtime {
void SetInstructionSet(InstructionSet instruction_set);
- void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
+ void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
- mirror::ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -588,15 +571,15 @@ class Runtime {
static constexpr int kProfileForground = 0;
static constexpr int kProfileBackgrouud = 1;
- GcRoot<mirror::ArtMethod> callee_save_methods_[kLastCalleeSaveType];
+ // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
+ uint64_t callee_save_methods_[kLastCalleeSaveType];
GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
- GcRoot<mirror::ArtMethod> resolution_method_;
- GcRoot<mirror::ArtMethod> imt_conflict_method_;
+ ArtMethod* resolution_method_;
+ ArtMethod* imt_conflict_method_;
// Unresolved method has the same behavior as the conflict method, it is used by the class linker
// for differentiating between unfilled imt slots vs conflict slots in superclasses.
- GcRoot<mirror::ArtMethod> imt_unimplemented_method_;
- GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_;
+ ArtMethod* imt_unimplemented_method_;
// Special sentinel object used to invalid conditions in JNI (cleared weak references) and
// JDWP (invalid references).
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 60ed55a..1cc2df6 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -158,20 +158,15 @@ class ScopedObjectAccessAlreadyRunnable {
return reinterpret_cast<jfieldID>(field);
}
- mirror::ArtMethod* DecodeMethod(jmethodID mid) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* DecodeMethod(jmethodID mid) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- CHECK(!kMovingMethods);
- mirror::ArtMethod* method = reinterpret_cast<mirror::ArtMethod*>(mid);
- return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&method);
+ return reinterpret_cast<ArtMethod*>(mid);
}
- jmethodID EncodeMethod(mirror::ArtMethod* method) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jmethodID EncodeMethod(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- CHECK(!kMovingMethods);
return reinterpret_cast<jmethodID>(method);
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 800acaa..6cca4d2 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -17,12 +17,14 @@
#include "stack.h"
#include "arch/context.h"
+#include "art_method-inl.h"
#include "base/hex_dump.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc_map.h"
-#include "mirror/art_method-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/space-inl.h"
+#include "linear_alloc.h"
#include "mirror/class-inl.h"
-#include "mirror/object.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "quick/quick_method_frame_info.h"
@@ -34,8 +36,10 @@
namespace art {
+static constexpr bool kDebugStackWalk = false;
+
mirror::Object* ShadowFrame::GetThisObject() const {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsStatic()) {
return nullptr;
} else if (m->IsNative()) {
@@ -49,7 +53,7 @@ mirror::Object* ShadowFrame::GetThisObject() const {
}
mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsStatic()) {
return nullptr;
} else {
@@ -113,11 +117,12 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
}
}
-extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
+extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* StackVisitor::GetThisObject() const {
- mirror::ArtMethod* m = GetMethod();
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ ArtMethod* m = GetMethod();
if (m->IsStatic()) {
return nullptr;
} else if (m->IsNative()) {
@@ -156,7 +161,7 @@ size_t StackVisitor::GetNativePcOffset() const {
return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_);
}
-bool StackVisitor::IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg) {
+bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
// Process register map (which native and runtime methods don't have)
if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
return false;
@@ -183,8 +188,7 @@ bool StackVisitor::IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg) {
return vreg < num_regs && TestBitmap(vreg, reg_bitmap);
}
-bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
- uint32_t* val) const {
+bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const {
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
@@ -200,7 +204,7 @@ bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
}
}
-bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
@@ -223,7 +227,7 @@ bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRe
}
}
-bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
@@ -287,7 +291,7 @@ bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t
return true;
}
-bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const {
if (kind_lo == kLongLoVReg) {
DCHECK_EQ(kind_hi, kLongHiVReg);
@@ -312,7 +316,7 @@ bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kin
}
}
-bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
@@ -339,7 +343,7 @@ bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg,
}
}
-bool StackVisitor::GetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
+bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const {
uint32_t low_32bits;
@@ -371,7 +375,7 @@ bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
return true;
}
-bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind) {
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
@@ -387,7 +391,7 @@ bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_val
}
}
-bool StackVisitor::SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
@@ -445,7 +449,7 @@ bool StackVisitor::SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRe
return true;
}
-bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi) {
if (kind_lo == kLongLoVReg) {
DCHECK_EQ(kind_hi, kLongHiVReg);
@@ -470,7 +474,7 @@ bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new
}
bool StackVisitor::SetVRegPairFromQuickCode(
- mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
+ ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
@@ -586,7 +590,7 @@ size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
return visitor.frames;
}
-bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc) {
+bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
struct HasMoreFramesVisitor : public StackVisitor {
HasMoreFramesVisitor(Thread* thread,
StackWalkKind walk_kind,
@@ -602,7 +606,7 @@ bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (found_frame_) {
- mirror::ArtMethod* method = GetMethod();
+ ArtMethod* method = GetMethod();
if (method != nullptr && !method->IsRuntimeMethod()) {
has_more_frames_ = true;
next_method_ = method;
@@ -618,7 +622,7 @@ bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32
size_t frame_height_;
bool found_frame_;
bool has_more_frames_;
- mirror::ArtMethod* next_method_;
+ ArtMethod* next_method_;
uint32_t next_dex_pc_;
};
HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
@@ -644,7 +648,7 @@ void StackVisitor::DescribeStack(Thread* thread) {
std::string StackVisitor::DescribeLocation() const {
std::string result("Visiting method '");
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m == nullptr) {
return "upcall";
}
@@ -664,8 +668,34 @@ static instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame(
void StackVisitor::SanityCheckFrame() const {
if (kIsDebugBuild) {
- mirror::ArtMethod* method = GetMethod();
- CHECK_EQ(method->GetClass(), mirror::ArtMethod::GetJavaLangReflectArtMethod());
+ ArtMethod* method = GetMethod();
+ auto* declaring_class = method->GetDeclaringClass();
+ // Runtime methods have null declaring class.
+ if (!method->IsRuntimeMethod()) {
+ CHECK(declaring_class != nullptr);
+ CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
+ << declaring_class;
+ } else {
+ CHECK(declaring_class == nullptr);
+ }
+ auto* runtime = Runtime::Current();
+ auto* la = runtime->GetLinearAlloc();
+ if (!la->Contains(method)) {
+ // Check image space.
+ bool in_image = false;
+ for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
+ auto* image_space = space->AsImageSpace();
+ const auto& header = image_space->GetImageHeader();
+ const auto* methods = &header.GetMethodsSection();
+ if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) {
+ in_image = true;
+ break;
+ }
+ }
+ }
+ CHECK(in_image) << PrettyMethod(method) << " not in linear alloc or image";
+ }
if (cur_quick_frame_ != nullptr) {
method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
// Frame sanity.
@@ -701,7 +731,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
- mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
+ ArtMethod* method = *cur_quick_frame_;
while (method != nullptr) {
SanityCheckFrame();
bool should_continue = VisitFrame();
@@ -727,8 +757,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
// Skip runtime save all callee frames which are used to deliver exceptions.
} else if (instrumentation_frame.interpreter_entry_) {
- mirror::ArtMethod* callee =
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
<< PrettyMethod(GetMethod());
} else if (instrumentation_frame.method_ != GetMethod()) {
@@ -747,9 +776,20 @@ void StackVisitor::WalkStack(bool include_transitions) {
}
cur_quick_frame_pc_ = return_pc;
uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
- cur_quick_frame_ = reinterpret_cast<StackReference<mirror::ArtMethod>*>(next_frame);
+ cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
+
+ if (kDebugStackWalk) {
+ LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
+ << " optimized=" << method->IsOptimized(sizeof(void*))
+ << " native=" << method->IsNative()
+ << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
+ << "," << method->GetEntryPointFromJni()
+ << "," << method->GetEntryPointFromInterpreter()
+ << " next=" << *cur_quick_frame_;
+ }
+
cur_depth_++;
- method = cur_quick_frame_->AsMirrorPtr();
+ method = *cur_quick_frame_;
}
} else if (cur_shadow_frame_ != nullptr) {
do {
@@ -782,4 +822,42 @@ void JavaFrameRootInfo::Describe(std::ostream& os) const {
visitor->DescribeLocation() << " vreg=" << vreg_;
}
+int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
+ uint32_t core_spills, uint32_t fp_spills,
+ size_t frame_size, int reg, InstructionSet isa) {
+ size_t pointer_size = InstructionSetPointerSize(isa);
+ if (kIsDebugBuild) {
+ auto* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size);
+ }
+ }
+ DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
+ DCHECK_NE(reg, -1);
+ int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
+ + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
+ + sizeof(uint32_t); // Filler.
+ int num_regs = code_item->registers_size_ - code_item->ins_size_;
+ int temp_threshold = code_item->registers_size_;
+ const int max_num_special_temps = 1;
+ if (reg == temp_threshold) {
+ // The current method pointer corresponds to special location on stack.
+ return 0;
+ } else if (reg >= temp_threshold + max_num_special_temps) {
+ /*
+ * Special temporaries may have custom locations and the logic above deals with that.
+ * However, non-special temporaries are placed relative to the outs.
+ */
+ int temps_start = code_item->outs_size_ * sizeof(uint32_t) + pointer_size /* art method */;
+ int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
+ return temps_start + relative_offset;
+ } else if (reg < num_regs) {
+ int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
+ return locals_start + (reg * sizeof(uint32_t));
+ } else {
+ // Handle ins.
+ return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + pointer_size /* art method */;
+ }
+}
+
} // namespace art
diff --git a/runtime/stack.h b/runtime/stack.h
index 4d36573..38dfe1b 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -31,10 +31,10 @@
namespace art {
namespace mirror {
- class ArtMethod;
class Object;
} // namespace mirror
+class ArtMethod;
class Context;
class ShadowFrame;
class HandleScope;
@@ -75,7 +75,7 @@ class ShadowFrame {
// Create ShadowFrame in heap for deoptimization.
static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+ ArtMethod* method, uint32_t dex_pc) {
uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
return Create(num_vregs, link, method, dex_pc, memory);
}
@@ -88,7 +88,7 @@ class ShadowFrame {
// Create ShadowFrame for interpreter using provided memory.
static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
- mirror::ArtMethod* method, uint32_t dex_pc, void* memory) {
+ ArtMethod* method, uint32_t dex_pc, void* memory) {
ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
return sf;
}
@@ -238,16 +238,11 @@ class ShadowFrame {
}
}
- mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(method_ != nullptr);
return method_;
}
- mirror::ArtMethod** GetMethodAddress() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(method_ != nullptr);
- return &method_;
- }
-
mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -284,7 +279,7 @@ class ShadowFrame {
}
private:
- ShadowFrame(uint32_t num_vregs, ShadowFrame* link, mirror::ArtMethod* method,
+ ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
uint32_t dex_pc, bool has_reference_array)
: number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
if (has_reference_array) {
@@ -308,7 +303,7 @@ class ShadowFrame {
const uint32_t number_of_vregs_;
// Link to previous shadow frame or null.
ShadowFrame* link_;
- mirror::ArtMethod* method_;
+ ArtMethod* method_;
uint32_t dex_pc_;
uint32_t vregs_[0];
@@ -356,11 +351,11 @@ class PACKED(4) ManagedStack {
return link_;
}
- StackReference<mirror::ArtMethod>* GetTopQuickFrame() const {
+ ArtMethod** GetTopQuickFrame() const {
return top_quick_frame_;
}
- void SetTopQuickFrame(StackReference<mirror::ArtMethod>* top) {
+ void SetTopQuickFrame(ArtMethod** top) {
DCHECK(top_shadow_frame_ == nullptr);
top_quick_frame_ = top;
}
@@ -403,7 +398,7 @@ class PACKED(4) ManagedStack {
bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
private:
- StackReference<mirror::ArtMethod>* top_quick_frame_;
+ ArtMethod** top_quick_frame_;
ManagedStack* link_;
ShadowFrame* top_shadow_frame_;
};
@@ -430,11 +425,11 @@ class StackVisitor {
void WalkStack(bool include_transitions = false)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (cur_shadow_frame_ != nullptr) {
return cur_shadow_frame_->GetMethod();
} else if (cur_quick_frame_ != nullptr) {
- return cur_quick_frame_->AsMirrorPtr();
+ return *cur_quick_frame_;
} else {
return nullptr;
}
@@ -484,30 +479,30 @@ class StackVisitor {
}
// Get the method and dex pc immediately after the one that's currently being visited.
- bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc)
+ bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg)
+ bool IsReferenceVReg(ArtMethod* m, uint16_t vreg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
+ bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
+ bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
+ bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ bool SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uintptr_t* GetGPRAddress(uint32_t reg) const;
// This is a fast-path for getting/setting values in a quick frame.
- uint32_t* GetVRegAddrFromQuickCode(StackReference<mirror::ArtMethod>* cur_quick_frame,
+ uint32_t* GetVRegAddrFromQuickCode(ArtMethod** cur_quick_frame,
const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
uint16_t vreg) const {
@@ -541,7 +536,7 @@ class StackVisitor {
* | IN[ins-1] | {Note: resides in caller's frame}
* | . |
* | IN[0] |
- * | caller's ArtMethod | ... StackReference<ArtMethod>
+ * | caller's ArtMethod | ... ArtMethod*
* +===============================+ {Note: start of callee's frame}
* | core callee-save spill | {variable sized}
* +-------------------------------+
@@ -568,46 +563,16 @@ class StackVisitor {
* | OUT[outs-2] |
* | . |
* | OUT[0] |
- * | StackReference<ArtMethod> | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
+ * | ArtMethod* | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
* +===============================+
*/
static int GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills,
- size_t frame_size, int reg, InstructionSet isa) {
- DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
- DCHECK_NE(reg, -1);
- int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
- + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
- + sizeof(uint32_t); // Filler.
- int num_regs = code_item->registers_size_ - code_item->ins_size_;
- int temp_threshold = code_item->registers_size_;
- const int max_num_special_temps = 1;
- if (reg == temp_threshold) {
- // The current method pointer corresponds to special location on stack.
- return 0;
- } else if (reg >= temp_threshold + max_num_special_temps) {
- /*
- * Special temporaries may have custom locations and the logic above deals with that.
- * However, non-special temporaries are placed relative to the outs.
- */
- int temps_start = sizeof(StackReference<mirror::ArtMethod>) +
- code_item->outs_size_ * sizeof(uint32_t);
- int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
- return temps_start + relative_offset;
- } else if (reg < num_regs) {
- int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
- return locals_start + (reg * sizeof(uint32_t));
- } else {
- // Handle ins.
- return frame_size + ((reg - num_regs) * sizeof(uint32_t)) +
- sizeof(StackReference<mirror::ArtMethod>);
- }
- }
+ size_t frame_size, int reg, InstructionSet isa);
static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
- UNUSED(isa);
// According to stack model, the first out is above the Method referernce.
- return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t));
+ return InstructionSetPointerSize(isa) + out_num * sizeof(uint32_t);
}
bool IsInInlinedFrame() const {
@@ -618,7 +583,7 @@ class StackVisitor {
return cur_quick_frame_pc_;
}
- StackReference<mirror::ArtMethod>* GetCurrentQuickFrame() const {
+ ArtMethod** GetCurrentQuickFrame() const {
return cur_quick_frame_;
}
@@ -626,10 +591,10 @@ class StackVisitor {
return cur_shadow_frame_;
}
- HandleScope* GetCurrentHandleScope() const {
- StackReference<mirror::ArtMethod>* sp = GetCurrentQuickFrame();
- ++sp; // Skip Method*; handle scope comes next;
- return reinterpret_cast<HandleScope*>(sp);
+ HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
+ ArtMethod** sp = GetCurrentQuickFrame();
+ // Skip ArtMethod*; handle scope comes next;
+ return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
}
std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -668,19 +633,19 @@ class StackVisitor {
uintptr_t GetFPR(uint32_t reg) const;
void SetFPR(uint32_t reg, uintptr_t value);
- bool GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+ bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+ bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+ bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
+ bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -688,13 +653,13 @@ class StackVisitor {
uint64_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+ bool SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ bool SetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value,
@@ -706,7 +671,7 @@ class StackVisitor {
Thread* const thread_;
const StackWalkKind walk_kind_;
ShadowFrame* cur_shadow_frame_;
- StackReference<mirror::ArtMethod>* cur_quick_frame_;
+ ArtMethod** cur_quick_frame_;
uintptr_t cur_quick_frame_pc_;
// Lazily computed, number of frames in the stack.
size_t num_frames_;
diff --git a/runtime/stride_iterator.h b/runtime/stride_iterator.h
new file mode 100644
index 0000000..5971524
--- /dev/null
+++ b/runtime/stride_iterator.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_STRIDE_ITERATOR_H_
+#define ART_RUNTIME_STRIDE_ITERATOR_H_
+
+#include <iterator>
+
+namespace art {
+
+template<typename T>
+class StrideIterator : public std::iterator<std::random_access_iterator_tag, T> {
+ public:
+ StrideIterator(const StrideIterator&) = default;
+ StrideIterator(StrideIterator&&) = default;
+ StrideIterator& operator=(const StrideIterator&) = default;
+ StrideIterator& operator=(StrideIterator&&) = default;
+
+ StrideIterator(uintptr_t ptr, size_t stride)
+ : ptr_(ptr), stride_(stride) {
+ }
+
+ bool operator==(const StrideIterator& other) const {
+ return ptr_ == other.ptr_;
+ }
+
+ bool operator!=(const StrideIterator& other) const {
+ return !(*this == other);
+ }
+
+ StrideIterator operator++() { // Value after modification.
+ ptr_ += stride_;
+ return *this;
+ }
+
+ StrideIterator operator++(int) {
+ auto temp = *this;
+ ptr_ += stride_;
+ return temp;
+ }
+
+ T& operator*() const {
+ return *reinterpret_cast<T*>(ptr_);
+ }
+
+ T* operator->() const {
+ return &**this;
+ }
+
+ private:
+ uintptr_t ptr_;
+ const size_t stride_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_STRIDE_ITERATOR_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b3b55c4..f37960f 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -33,6 +33,7 @@
#include "arch/context.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
@@ -50,7 +51,6 @@
#include "handle_scope-inl.h"
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
@@ -756,7 +756,7 @@ bool Thread::RequestCheckpoint(Closure* function) {
union StateAndFlags new_state_and_flags;
new_state_and_flags.as_int = old_state_and_flags.as_int;
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
- bool success =tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
+ bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
old_state_and_flags.as_int, new_state_and_flags.as_int);
if (UNLIKELY(!success)) {
// The thread changed state before the checkpoint was installed.
@@ -958,7 +958,7 @@ struct StackDumpVisitor : public StackVisitor {
}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
}
@@ -1029,7 +1029,7 @@ struct StackDumpVisitor : public StackVisitor {
std::ostream& os;
const Thread* thread;
const bool can_allocate;
- mirror::ArtMethod* last_method;
+ ArtMethod* last_method;
int last_line_number;
int repetition_count;
int frame_count;
@@ -1060,7 +1060,7 @@ static bool ShouldShowNativeStack(const Thread* thread)
// We don't just check kNative because native methods will be in state kSuspended if they're
// calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
// thread-startup states if it's early enough in their life cycle (http://b/7432159).
- mirror::ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
+ ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
return current_method != nullptr && current_method->IsNative();
}
@@ -1541,7 +1541,7 @@ class CountStackDepthVisitor : public StackVisitor {
// We want to skip frames up to and including the exception's constructor.
// Note we also skip the frame if it doesn't have a method (namely the callee
// save frame)
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (skipping_ && !m->IsRuntimeMethod() &&
!mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
skipping_ = false;
@@ -1578,63 +1578,54 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
self_(self),
skip_depth_(skip_depth),
count_(0),
- dex_pc_trace_(nullptr),
- method_trace_(nullptr) {}
+ trace_(nullptr),
+ pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
bool Init(int depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Allocate method trace with an extra slot that will hold the PC trace
- StackHandleScope<1> hs(self_);
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Handle<mirror::ObjectArray<mirror::Object>> method_trace(
- hs.NewHandle(class_linker->AllocObjectArray<mirror::Object>(self_, depth + 1)));
- if (method_trace.Get() == nullptr) {
+ // Allocate method trace with format [method pointers][pcs].
+ auto* cl = Runtime::Current()->GetClassLinker();
+ trace_ = cl->AllocPointerArray(self_, depth * 2);
+ if (trace_ == nullptr) {
+ self_->AssertPendingOOMException();
return false;
}
- mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
- if (dex_pc_trace == nullptr) {
- return false;
- }
- // Save PC trace in last element of method trace, also places it into the
- // object graph.
- // We are called from native: use non-transactional mode.
- method_trace->Set<kTransactionActive>(depth, dex_pc_trace);
- // Set the Object*s and assert that no thread suspension is now possible.
+ // If We are called from native, use non-transactional mode.
const char* last_no_suspend_cause =
self_->StartAssertNoThreadSuspension("Building internal stack trace");
CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
- method_trace_ = method_trace.Get();
- dex_pc_trace_ = dex_pc_trace;
return true;
}
virtual ~BuildInternalStackTraceVisitor() {
- if (method_trace_ != nullptr) {
+ if (trace_ != nullptr) {
self_->EndAssertNoThreadSuspension(nullptr);
}
}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (method_trace_ == nullptr || dex_pc_trace_ == nullptr) {
+ if (trace_ == nullptr) {
return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
}
if (skip_depth_ > 0) {
skip_depth_--;
return true;
}
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true; // Ignore runtime frames (in particular callee save).
}
- method_trace_->Set<kTransactionActive>(count_, m);
- dex_pc_trace_->Set<kTransactionActive>(count_,
- m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
+ trace_->SetElementPtrSize<kTransactionActive>(
+ count_, m, pointer_size_);
+ trace_->SetElementPtrSize<kTransactionActive>(
+ trace_->GetLength() / 2 + count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(),
+ pointer_size_);
++count_;
return true;
}
- mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
- return method_trace_;
+ mirror::PointerArray* GetInternalStackTrace() const {
+ return trace_;
}
private:
@@ -1643,10 +1634,10 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
int32_t skip_depth_;
// Current position down stack trace.
uint32_t count_;
- // Array of dex PC values.
- mirror::IntArray* dex_pc_trace_;
- // An array of the methods on the stack, the last entry is a reference to the PC trace.
- mirror::ObjectArray<mirror::Object>* method_trace_;
+ // An array of the methods on the stack, the last entries are the dex PCs.
+ mirror::PointerArray* trace_;
+ // For cross compilation.
+ size_t pointer_size_;
};
template<bool kTransactionActive>
@@ -1665,13 +1656,16 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable
return nullptr; // Allocation failed.
}
build_trace_visitor.WalkStack();
- mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
+ mirror::PointerArray* trace = build_trace_visitor.GetInternalStackTrace();
if (kIsDebugBuild) {
- for (int32_t i = 0; i < trace->GetLength(); ++i) {
- CHECK(trace->Get(i) != nullptr);
+ // Second half is dex PCs.
+ for (uint32_t i = 0; i < static_cast<uint32_t>(trace->GetLength() / 2); ++i) {
+ auto* method = trace->GetElementPtrSize<ArtMethod*>(
+ i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ CHECK(method != nullptr);
}
}
- return soa.AddLocalReference<jobjectArray>(trace);
+ return soa.AddLocalReference<jobject>(trace);
}
template jobject Thread::CreateInternalStackTrace<false>(
const ScopedObjectAccessAlreadyRunnable& soa) const;
@@ -1688,9 +1682,9 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array,
int* stack_depth) {
// Decode the internal stack trace into the depth, method trace and PC trace
- int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1;
+ int32_t depth = soa.Decode<mirror::PointerArray*>(internal)->GetLength() / 2;
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ auto* cl = Runtime::Current()->GetClassLinker();
jobjectArray result;
@@ -1704,7 +1698,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
} else {
// Create java_trace array and place in local reference table
mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
- class_linker->AllocStackTraceElementArray(soa.Self(), depth);
+ cl->AllocStackTraceElementArray(soa.Self(), depth);
if (java_traces == nullptr) {
return nullptr;
}
@@ -1716,10 +1710,11 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
}
for (int32_t i = 0; i < depth; ++i) {
- mirror::ObjectArray<mirror::Object>* method_trace =
- soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
+ auto* method_trace = soa.Decode<mirror::PointerArray*>(internal);
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
- mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
+ ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
+ uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
+ i + method_trace->GetLength() / 2, sizeof(void*));
int32_t line_number;
StackHandleScope<3> hs(soa.Self());
auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
@@ -1729,27 +1724,28 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
class_name_object.Assign(method->GetDeclaringClass()->GetName());
// source_name_object intentionally left null for proxy methods
} else {
- mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
- uint32_t dex_pc = pc_trace->Get(i);
line_number = method->GetLineNumFromDexPC(dex_pc);
// Allocate element, potentially triggering GC
// TODO: reuse class_name_object via Class::name_?
const char* descriptor = method->GetDeclaringClassDescriptor();
CHECK(descriptor != nullptr);
std::string class_name(PrettyDescriptor(descriptor));
- class_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
+ class_name_object.Assign(
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
if (class_name_object.Get() == nullptr) {
+ soa.Self()->AssertPendingOOMException();
return nullptr;
}
const char* source_file = method->GetDeclaringClassSourceFile();
if (source_file != nullptr) {
source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
if (source_name_object.Get() == nullptr) {
+ soa.Self()->AssertPendingOOMException();
return nullptr;
}
}
}
- const char* method_name = method->GetName();
+ const char* method_name = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
CHECK(method_name != nullptr);
Handle<mirror::String> method_name_object(
hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
@@ -1790,7 +1786,7 @@ void Thread::ThrowNewException(const char* exception_class_descriptor,
static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+ ArtMethod* method = self->GetCurrentMethod(nullptr);
return method != nullptr
? method->GetDeclaringClass()->GetClassLoader()
: nullptr;
@@ -1805,9 +1801,9 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
ClearException();
Runtime* runtime = Runtime::Current();
+ auto* cl = runtime->GetClassLinker();
Handle<mirror::Class> exception_class(
- hs.NewHandle(runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
- class_loader)));
+ hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader)));
if (UNLIKELY(exception_class.Get() == nullptr)) {
CHECK(IsExceptionPending());
LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
@@ -1852,8 +1848,8 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
signature = "(Ljava/lang/Throwable;)V";
}
}
- mirror::ArtMethod* exception_init_method =
- exception_class->FindDeclaredDirectMethod("<init>", signature);
+ ArtMethod* exception_init_method =
+ exception_class->FindDeclaredDirectMethod("<init>", signature, cl->GetImagePointerSize());
CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
<< PrettyDescriptor(exception_class_descriptor);
@@ -2129,7 +2125,7 @@ struct CurrentMethodVisitor FINAL : public StackVisitor {
dex_pc_(0),
abort_on_error_(abort_on_error) {}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
return true;
@@ -2142,12 +2138,12 @@ struct CurrentMethodVisitor FINAL : public StackVisitor {
return false;
}
mirror::Object* this_object_;
- mirror::ArtMethod* method_;
+ ArtMethod* method_;
uint32_t dex_pc_;
const bool abort_on_error_;
};
-mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
+ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
visitor.WalkStack(false);
if (dex_pc != nullptr) {
@@ -2189,9 +2185,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod** method_addr = shadow_frame->GetMethodAddress();
- visitor_(reinterpret_cast<mirror::Object**>(method_addr), 0 /*ignored*/, this);
- mirror::ArtMethod* m = *method_addr;
+ ArtMethod* m = shadow_frame->GetMethod();
DCHECK(m != nullptr);
size_t num_regs = shadow_frame->NumberOfVRegs();
if (m->IsNative() || shadow_frame->HasReferenceArray()) {
@@ -2233,17 +2227,15 @@ class ReferenceMapVisitor : public StackVisitor {
private:
void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
- mirror::ArtMethod* m = cur_quick_frame->AsMirrorPtr();
- mirror::ArtMethod* old_method = m;
- visitor_(reinterpret_cast<mirror::Object**>(&m), 0 /*ignored*/, this);
- if (m != old_method) {
- cur_quick_frame->Assign(m);
- }
+ auto* cur_quick_frame = GetCurrentQuickFrame();
+ DCHECK(cur_quick_frame != nullptr);
+ auto* m = *cur_quick_frame;
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
if (m->IsOptimized(sizeof(void*))) {
+ auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
+ reinterpret_cast<uintptr_t>(cur_quick_frame));
Runtime* runtime = Runtime::Current();
const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
@@ -2253,8 +2245,7 @@ class ReferenceMapVisitor : public StackVisitor {
// Visit stack entries that hold pointers.
for (size_t i = 0; i < mask.size_in_bits(); ++i) {
if (mask.LoadBit(i)) {
- StackReference<mirror::Object>* ref_addr =
- reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame) + i;
+ auto* ref_addr = vreg_base + i;
mirror::Object* ref = ref_addr->AsMirrorPtr();
if (ref != nullptr) {
mirror::Object* new_ref = ref;
@@ -2290,7 +2281,7 @@ class ReferenceMapVisitor : public StackVisitor {
uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
- const void* code_pointer = mirror::ArtMethod::EntryPointToCodePointer(entry_point);
+ const void* code_pointer = ArtMethod::EntryPointToCodePointer(entry_point);
const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
// For all dex registers in the bitmap
@@ -2361,9 +2352,6 @@ void Thread::VisitRoots(RootVisitor* visitor) {
if (tlsPtr_.debug_invoke_req != nullptr) {
tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
}
- if (tlsPtr_.single_step_control != nullptr) {
- tlsPtr_.single_step_control->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
- }
if (tlsPtr_.deoptimization_shadow_frame != nullptr) {
RootCallbackVisitor visitor_to_callback(visitor, thread_id);
ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
@@ -2392,9 +2380,6 @@ void Thread::VisitRoots(RootVisitor* visitor) {
ReleaseLongJumpContext(context);
for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
- DCHECK(frame.method_ != nullptr);
- visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&frame.method_),
- RootInfo(kRootVMInternal, thread_id));
}
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 96e0916..8c2e215 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -52,7 +52,6 @@ namespace collector {
} // namespace gc
namespace mirror {
- class ArtMethod;
class Array;
class Class;
class ClassLoader;
@@ -69,6 +68,7 @@ namespace verifier {
class MethodVerifier;
} // namespace verifier
+class ArtMethod;
class BaseMutex;
class ClassLinker;
class Closure;
@@ -374,7 +374,7 @@ class Thread {
// Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
// abort the runtime iff abort_on_error is true.
- mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
+ ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns whether the given exception was thrown by the current Java method being executed
@@ -382,7 +382,7 @@ class Thread {
bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method) {
+ void SetTopOfStack(ArtMethod** top_method) {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
}
@@ -810,11 +810,11 @@ class Thread {
return tlsPtr_.instrumentation_stack;
}
- std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
+ std::vector<ArtMethod*>* GetStackTraceSample() const {
return tlsPtr_.stack_trace_sample;
}
- void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
+ void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
tlsPtr_.stack_trace_sample = sample;
}
@@ -1161,7 +1161,7 @@ class Thread {
size_t stack_size;
// Pointer to previous stack trace captured by sampling profiler.
- std::vector<mirror::ArtMethod*>* stack_trace_sample;
+ std::vector<ArtMethod*>* stack_trace_sample;
// The next thread in the wait set this thread is part of or null if not waiting.
Thread* wait_next;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index f874716..d3b3af8 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -22,6 +22,7 @@
#define ATRACE_TAG ATRACE_TAG_DALVIK
#include "cutils/trace.h"
+#include "art_method-inl.h"
#include "base/casts.h"
#include "base/stl_util.h"
#include "base/time_utils.h"
@@ -31,7 +32,6 @@
#include "debugger.h"
#include "dex_file-inl.h"
#include "instrumentation.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object_array-inl.h"
@@ -98,7 +98,7 @@ class BuildStackTraceVisitor : public StackVisitor {
method_trace_(Trace::AllocStackTrace()) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
// Ignore runtime frames (in particular callee save).
if (!m->IsRuntimeMethod()) {
method_trace_->push_back(m);
@@ -107,12 +107,12 @@ class BuildStackTraceVisitor : public StackVisitor {
}
// Returns a stack trace where the topmost frame corresponds with the first element of the vector.
- std::vector<mirror::ArtMethod*>* GetStackTrace() const {
+ std::vector<ArtMethod*>* GetStackTrace() const {
return method_trace_;
}
private:
- std::vector<mirror::ArtMethod*>* const method_trace_;
+ std::vector<ArtMethod*>* const method_trace_;
};
static const char kTraceTokenChar = '*';
@@ -120,42 +120,41 @@ static const uint16_t kTraceHeaderLength = 32;
static const uint32_t kTraceMagicValue = 0x574f4c53;
static const uint16_t kTraceVersionSingleClock = 2;
static const uint16_t kTraceVersionDualClock = 3;
-static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2
-static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps
+static const uint16_t kTraceRecordSizeSingleClock = 14; // using v2
+static const uint16_t kTraceRecordSizeDualClock = 18; // using v3 with two timestamps
TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource;
Trace* volatile Trace::the_trace_ = nullptr;
pthread_t Trace::sampling_pthread_ = 0U;
-std::unique_ptr<std::vector<mirror::ArtMethod*>> Trace::temp_stack_trace_;
+std::unique_ptr<std::vector<ArtMethod*>> Trace::temp_stack_trace_;
// The key identifying the tracer to update instrumentation.
static constexpr const char* kTracerInstrumentationKey = "Tracer";
-static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) {
- return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask);
+static ArtMethod* DecodeTraceMethodId(uint64_t tmid) {
+ return reinterpret_cast<ArtMethod*>(tmid & ~kTraceMethodActionMask);
}
static TraceAction DecodeTraceAction(uint32_t tmid) {
return static_cast<TraceAction>(tmid & kTraceMethodActionMask);
}
-static uint32_t EncodeTraceMethodAndAction(mirror::ArtMethod* method,
- TraceAction action) {
- uint32_t tmid = PointerToLowMemUInt32(method) | action;
+static uint64_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) {
+ auto tmid = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(method)) | action;
DCHECK_EQ(method, DecodeTraceMethodId(tmid));
return tmid;
}
-std::vector<mirror::ArtMethod*>* Trace::AllocStackTrace() {
+std::vector<ArtMethod*>* Trace::AllocStackTrace() {
if (temp_stack_trace_.get() != nullptr) {
return temp_stack_trace_.release();
} else {
- return new std::vector<mirror::ArtMethod*>();
+ return new std::vector<ArtMethod*>();
}
}
-void Trace::FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace) {
+void Trace::FreeStackTrace(std::vector<ArtMethod*>* stack_trace) {
stack_trace->clear();
temp_stack_trace_.reset(stack_trace);
}
@@ -248,22 +247,22 @@ static void Append8LE(uint8_t* buf, uint64_t val) {
static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
BuildStackTraceVisitor build_trace_visitor(thread);
build_trace_visitor.WalkStack();
- std::vector<mirror::ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
+ std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
Trace* the_trace = reinterpret_cast<Trace*>(arg);
the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
}
static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg ATTRIBUTE_UNUSED) {
thread->SetTraceClockBase(0);
- std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample();
+ std::vector<ArtMethod*>* stack_trace = thread->GetStackTraceSample();
thread->SetStackTraceSample(nullptr);
delete stack_trace;
}
void Trace::CompareAndUpdateStackTrace(Thread* thread,
- std::vector<mirror::ArtMethod*>* stack_trace) {
+ std::vector<ArtMethod*>* stack_trace) {
CHECK_EQ(pthread_self(), sampling_pthread_);
- std::vector<mirror::ArtMethod*>* old_stack_trace = thread->GetStackTraceSample();
+ std::vector<ArtMethod*>* old_stack_trace = thread->GetStackTraceSample();
// Update the thread's stack trace sample.
thread->SetStackTraceSample(stack_trace);
// Read timer clocks to use for all events in this trace.
@@ -273,7 +272,7 @@ void Trace::CompareAndUpdateStackTrace(Thread* thread,
if (old_stack_trace == nullptr) {
// If there's no previous stack trace sample for this thread, log an entry event for all
// methods in the trace.
- for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin();
+ for (std::vector<ArtMethod*>::reverse_iterator rit = stack_trace->rbegin();
rit != stack_trace->rend(); ++rit) {
LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered,
thread_clock_diff, wall_clock_diff);
@@ -281,15 +280,15 @@ void Trace::CompareAndUpdateStackTrace(Thread* thread,
} else {
// If there's a previous stack trace for this thread, diff the traces and emit entry and exit
// events accordingly.
- std::vector<mirror::ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin();
- std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin();
+ std::vector<ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin();
+ std::vector<ArtMethod*>::reverse_iterator rit = stack_trace->rbegin();
// Iterate bottom-up over both traces until there's a difference between them.
while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) {
old_rit++;
rit++;
}
// Iterate top-down over the old trace until the point where they differ, emitting exit events.
- for (std::vector<mirror::ArtMethod*>::iterator old_it = old_stack_trace->begin();
+ for (std::vector<ArtMethod*>::iterator old_it = old_stack_trace->begin();
old_it != old_rit.base(); ++old_it) {
LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited,
thread_clock_diff, wall_clock_diff);
@@ -640,14 +639,22 @@ Trace::~Trace() {
delete streaming_lock_;
}
+static uint64_t ReadBytes(uint8_t* buf, size_t bytes) {
+ uint64_t ret = 0;
+ for (size_t i = 0; i < bytes; ++i) {
+ ret |= static_cast<uint64_t>(buf[i]) << (i * 8);
+ }
+ return ret;
+}
+
static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint8_t* ptr = buf + kTraceHeaderLength;
uint8_t* end = buf + buf_size;
while (ptr < end) {
- uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24);
- mirror::ArtMethod* method = DecodeTraceMethodId(tmid);
+ uint64_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
+ ArtMethod* method = DecodeTraceMethodId(tmid);
TraceAction action = DecodeTraceAction(tmid);
LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action);
ptr += GetRecordSize(clock_source);
@@ -656,12 +663,12 @@ static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source
static void GetVisitedMethodsFromBitSets(
const std::map<mirror::DexCache*, DexIndexBitSet*>& seen_methods,
- std::set<mirror::ArtMethod*>* visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::set<ArtMethod*>* visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
for (auto& e : seen_methods) {
DexIndexBitSet* bit_set = e.second;
for (uint32_t i = 0; i < bit_set->size(); ++i) {
if ((*bit_set)[i]) {
- visited_methods->insert(e.first->GetResolvedMethod(i));
+ visited_methods->insert(e.first->GetResolvedMethod(i, sizeof(void*)));
}
}
}
@@ -670,7 +677,7 @@ static void GetVisitedMethodsFromBitSets(
void Trace::FinishTracing() {
size_t final_offset = 0;
- std::set<mirror::ArtMethod*> visited_methods;
+ std::set<ArtMethod*> visited_methods;
if (trace_output_mode_ == TraceOutputMode::kStreaming) {
// Write the secondary file with all the method names.
GetVisitedMethodsFromBitSets(seen_methods_, &visited_methods);
@@ -761,14 +768,14 @@ void Trace::FinishTracing() {
}
void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t new_dex_pc) {
+ ArtMethod* method, uint32_t new_dex_pc) {
UNUSED(thread, this_object, method, new_dex_pc);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
}
void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field)
+ ArtMethod* method, uint32_t dex_pc, ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
@@ -776,7 +783,7 @@ void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
}
void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field,
+ ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(thread, this_object, method, dex_pc, field, field_value);
@@ -785,7 +792,7 @@ void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
}
void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
+ ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -794,7 +801,7 @@ void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_
}
void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED,
+ ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
@@ -804,7 +811,7 @@ void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_U
}
void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
- mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
+ ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -818,7 +825,7 @@ void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
-void Trace::BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method,
+void Trace::BackwardBranch(Thread* /*thread*/, ArtMethod* method,
int32_t /*dex_pc_offset*/)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method);
@@ -840,11 +847,12 @@ void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wa
}
}
-bool Trace::RegisterMethod(mirror::ArtMethod* method) {
+bool Trace::RegisterMethod(ArtMethod* method) {
mirror::DexCache* dex_cache = method->GetDexCache();
- if (dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) != method) {
- DCHECK(dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) == nullptr);
- dex_cache->SetResolvedMethod(method->GetDexMethodIndex(), method);
+ auto* resolved_method = dex_cache->GetResolvedMethod(method->GetDexMethodIndex(), sizeof(void*));
+ if (resolved_method != method) {
+ DCHECK(resolved_method == nullptr);
+ dex_cache->SetResolvedMethod(method->GetDexMethodIndex(), method, sizeof(void*));
}
if (seen_methods_.find(dex_cache) == seen_methods_.end()) {
seen_methods_.insert(std::make_pair(dex_cache, new DexIndexBitSet()));
@@ -869,8 +877,9 @@ bool Trace::RegisterThread(Thread* thread) {
return false;
}
-static std::string GetMethodLine(mirror::ArtMethod* method)
+static std::string GetMethodLine(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ method = method->GetInterfaceMethodIfProxy(sizeof(void*));
return StringPrintf("%p\t%s\t%s\t%s\t%s\n", method,
PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(),
method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile());
@@ -902,7 +911,7 @@ void Trace::WriteToBuf(const uint8_t* src, size_t src_size) {
memcpy(buf_.get() + old_offset, src, src_size);
}
-void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method,
+void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff) {
// Advance cur_offset_ atomically.
@@ -936,11 +945,11 @@ void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method,
UNIMPLEMENTED(FATAL) << "Unexpected event: " << event;
}
- uint32_t method_value = EncodeTraceMethodAndAction(method, action);
+ uint64_t method_value = EncodeTraceMethodAndAction(method, action);
// Write data
uint8_t* ptr;
- static constexpr size_t kPacketSize = 14U; // The maximum size of data in a packet.
+ static constexpr size_t kPacketSize = 18U; // The maximum size of data in a packet.
uint8_t stack_buf[kPacketSize]; // Space to store a packet when in streaming mode.
if (trace_output_mode_ == TraceOutputMode::kStreaming) {
ptr = stack_buf;
@@ -949,8 +958,8 @@ void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method,
}
Append2LE(ptr, thread->GetTid());
- Append4LE(ptr + 2, method_value);
- ptr += 6;
+ Append8LE(ptr + 2, method_value);
+ ptr += 10;
if (UseThreadCpuClock()) {
Append4LE(ptr, thread_clock_diff);
@@ -959,7 +968,7 @@ void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method,
if (UseWallClock()) {
Append4LE(ptr, wall_clock_diff);
}
- static_assert(kPacketSize == 2 + 4 + 4 + 4, "Packet size incorrect.");
+ static_assert(kPacketSize == 2 + 8 + 4 + 4, "Packet size incorrect.");
if (trace_output_mode_ == TraceOutputMode::kStreaming) {
MutexLock mu(Thread::Current(), *streaming_lock_); // To serialize writing.
@@ -990,19 +999,19 @@ void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method,
}
void Trace::GetVisitedMethods(size_t buf_size,
- std::set<mirror::ArtMethod*>* visited_methods) {
+ std::set<ArtMethod*>* visited_methods) {
uint8_t* ptr = buf_.get() + kTraceHeaderLength;
uint8_t* end = buf_.get() + buf_size;
while (ptr < end) {
- uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24);
- mirror::ArtMethod* method = DecodeTraceMethodId(tmid);
+ uint64_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
+ ArtMethod* method = DecodeTraceMethodId(tmid);
visited_methods->insert(method);
ptr += GetRecordSize(clock_source_);
}
}
-void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) {
+void Trace::DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods) {
for (const auto& method : visited_methods) {
os << GetMethodLine(method);
}
diff --git a/runtime/trace.h b/runtime/trace.h
index df6d5e7..1539c06 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -35,11 +35,11 @@
namespace art {
namespace mirror {
- class ArtMethod;
class DexCache;
} // namespace mirror
class ArtField;
+class ArtMethod;
class Thread;
using DexIndexBitSet = std::bitset<65536>;
@@ -99,38 +99,38 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
void MeasureClockOverhead();
uint32_t GetClockOverheadNanoSeconds();
- void CompareAndUpdateStackTrace(Thread* thread, std::vector<mirror::ArtMethod*>* stack_trace)
+ void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// InstrumentationListener implementation.
void MethodEntered(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc)
+ ArtMethod* method, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void MethodExited(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
+ ArtMethod* method, uint32_t dex_pc,
const JValue& return_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void MethodUnwind(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc)
+ ArtMethod* method, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void DexPcMoved(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t new_dex_pc)
+ ArtMethod* method, uint32_t new_dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void FieldRead(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field)
+ ArtMethod* method, uint32_t dex_pc, ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void FieldWritten(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field,
+ ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
- static std::vector<mirror::ArtMethod*>* AllocStackTrace();
+ static std::vector<ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
- static void FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace);
+ static void FreeStackTrace(std::vector<ArtMethod*>* stack_trace);
// Save id and name of a thread before it exits.
static void StoreExitingThreadInfo(Thread* thread);
@@ -150,20 +150,20 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
- void LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method,
+ void LogMethodTraceEvent(Thread* thread, ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Methods to output traced methods and threads.
- void GetVisitedMethods(size_t end_offset, std::set<mirror::ArtMethod*>* visited_methods);
- void DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods)
+ void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods);
+ void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_);
// Methods to register seen entitites in streaming mode. The methods return true if the entity
// is newly discovered.
- bool RegisterMethod(mirror::ArtMethod* method)
+ bool RegisterMethod(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
bool RegisterThread(Thread* thread)
EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
@@ -183,7 +183,7 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
static pthread_t sampling_pthread_;
// Used to remember an unused stack trace to avoid re-allocation during sampling.
- static std::unique_ptr<std::vector<mirror::ArtMethod*>> temp_stack_trace_;
+ static std::unique_ptr<std::vector<ArtMethod*>> temp_stack_trace_;
// File to write trace data out to, null if direct to ddms.
std::unique_ptr<File> trace_file_;
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index ab821d7..d91860b 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -20,6 +20,7 @@
#include "base/logging.h"
#include "gc/accounting/card_table-inl.h"
#include "intern_table.h"
+#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index aee2c54..8279a26 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -17,10 +17,10 @@
#include "transaction.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 2671b46..4923342 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -26,10 +26,10 @@
#include <memory>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -344,10 +344,13 @@ std::string PrettyReturnType(const char* signature) {
return PrettyDescriptor(return_type);
}
-std::string PrettyMethod(mirror::ArtMethod* m, bool with_signature) {
+std::string PrettyMethod(ArtMethod* m, bool with_signature) {
if (m == nullptr) {
return "null";
}
+ if (!m->IsRuntimeMethod()) {
+ m = m->GetInterfaceMethodIfProxy(Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ }
std::string result(PrettyDescriptor(m->GetDeclaringClassDescriptor()));
result += '.';
result += m->GetName();
@@ -595,7 +598,7 @@ std::string DescriptorToName(const char* descriptor) {
return descriptor;
}
-std::string JniShortName(mirror::ArtMethod* m) {
+std::string JniShortName(ArtMethod* m) {
std::string class_name(m->GetDeclaringClassDescriptor());
// Remove the leading 'L' and trailing ';'...
CHECK_EQ(class_name[0], 'L') << class_name;
@@ -613,7 +616,7 @@ std::string JniShortName(mirror::ArtMethod* m) {
return short_name;
}
-std::string JniLongName(mirror::ArtMethod* m) {
+std::string JniLongName(ArtMethod* m) {
std::string long_name;
long_name += JniShortName(m);
long_name += "__";
@@ -1088,7 +1091,7 @@ static void Addr2line(const std::string& map_src, uintptr_t offset, std::ostream
#endif
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
- mirror::ArtMethod* current_method, void* ucontext_ptr) {
+ ArtMethod* current_method, void* ucontext_ptr) {
#if __linux__
// b/18119146
if (RUNNING_ON_VALGRIND != 0) {
diff --git a/runtime/utils.h b/runtime/utils.h
index e7532e1..1ef98e7 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -34,10 +34,10 @@
namespace art {
class ArtField;
+class ArtMethod;
class DexFile;
namespace mirror {
-class ArtMethod;
class Class;
class Object;
class String;
@@ -125,7 +125,7 @@ std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_t
// Returns a human-readable signature for 'm'. Something like "a.b.C.m" or
// "a.b.C.m(II)V" (depending on the value of 'with_signature').
-std::string PrettyMethod(mirror::ArtMethod* m, bool with_signature = true)
+std::string PrettyMethod(ArtMethod* m, bool with_signature = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true);
@@ -181,10 +181,10 @@ bool IsValidDescriptor(const char* s); // "Ljava/lang/String;"
bool IsValidMemberName(const char* s);
// Returns the JNI native function name for the non-overloaded method 'm'.
-std::string JniShortName(mirror::ArtMethod* m)
+std::string JniShortName(ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the JNI native function name for the overloaded method 'm'.
-std::string JniLongName(mirror::ArtMethod* m)
+std::string JniLongName(ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool ReadFileToString(const std::string& file_name, std::string* result);
@@ -221,7 +221,7 @@ void SetThreadName(const char* thread_name);
// Dumps the native stack for thread 'tid' to 'os'.
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "",
- mirror::ArtMethod* current_method = nullptr, void* ucontext = nullptr)
+ ArtMethod* current_method = nullptr, void* ucontext = nullptr)
NO_THREAD_SAFETY_ANALYSIS;
// Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
@@ -320,6 +320,9 @@ inline bool TestBitmap(size_t idx, const uint8_t* bitmap) {
return ((bitmap[idx / kBitsPerByte] >> (idx % kBitsPerByte)) & 0x01) != 0;
}
+static inline constexpr bool ValidPointerSize(size_t pointer_size) {
+ return pointer_size == 4 || pointer_size == 8;
+}
} // namespace art
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 8a7f805..66e38b1 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -185,19 +185,19 @@ TEST_F(UtilsTest, JniShortName_JniLongName) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;");
ASSERT_TRUE(c != nullptr);
- mirror::ArtMethod* m;
+ ArtMethod* m;
- m = c->FindVirtualMethod("charAt", "(I)C");
+ m = c->FindVirtualMethod("charAt", "(I)C", sizeof(void*));
ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_charAt", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_charAt__I", JniLongName(m));
- m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I");
+ m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I", sizeof(void*));
ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_indexOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_indexOf__Ljava_lang_String_2I", JniLongName(m));
- m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;");
+ m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;", sizeof(void*));
ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_copyValueOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_copyValueOf___3CII", JniLongName(m));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index e6801de..aa54b17 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -19,6 +19,7 @@
#include <iostream>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/mutex-inl.h"
#include "base/time_utils.h"
@@ -32,7 +33,6 @@
#include "indenter.h"
#include "intern_table.h"
#include "leb128.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
@@ -115,15 +115,13 @@ static void SafelyMarkAllRegistersAsConflicts(MethodVerifier* verifier, Register
}
MethodVerifier::FailureKind MethodVerifier::VerifyMethod(
- mirror::ArtMethod* method, bool allow_soft_failures, std::string* error ATTRIBUTE_UNUSED) {
- Thread* self = Thread::Current();
- StackHandleScope<3> hs(self);
+ ArtMethod* method, bool allow_soft_failures, std::string* error ATTRIBUTE_UNUSED) {
+ StackHandleScope<2> hs(Thread::Current());
mirror::Class* klass = method->GetDeclaringClass();
auto h_dex_cache(hs.NewHandle(klass->GetDexCache()));
auto h_class_loader(hs.NewHandle(klass->GetClassLoader()));
- auto h_method = hs.NewHandle(method);
- return VerifyMethod(self, method->GetDexMethodIndex(), method->GetDexFile(), h_dex_cache,
- h_class_loader, klass->GetClassDef(), method->GetCodeItem(), h_method,
+ return VerifyMethod(hs.Self(), method->GetDexMethodIndex(), method->GetDexFile(), h_dex_cache,
+ h_class_loader, klass->GetClassDef(), method->GetCodeItem(), method,
method->GetAccessFlags(), allow_soft_failures, false);
}
@@ -162,7 +160,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
- return VerifyClass(self, &dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error);
+ return VerifyClass(
+ self, &dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error);
}
MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
@@ -197,16 +196,16 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
}
previous_direct_method_idx = method_idx;
InvokeType type = it.GetMethodInvokeType(*class_def);
- mirror::ArtMethod* method =
- linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
- NullHandle<mirror::ArtMethod>(), type);
+ ArtMethod* method = linker->ResolveMethod(
+ *dex_file, method_idx, dex_cache, class_loader, nullptr, type);
if (method == nullptr) {
DCHECK(self->IsExceptionPending());
// We couldn't resolve the method, but continue regardless.
self->ClearException();
+ } else {
+ DCHECK(method->GetDeclaringClassUnchecked() != nullptr) << type;
}
StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
MethodVerifier::FailureKind result = VerifyMethod(self,
method_idx,
dex_file,
@@ -214,10 +213,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
class_loader,
class_def,
it.GetMethodCodeItem(),
- h_method,
- it.GetMethodAccessFlags(),
- allow_soft_failures,
- false);
+ method, it.GetMethodAccessFlags(), allow_soft_failures, false);
if (result != kNoFailure) {
if (result == kHardFailure) {
hard_fail = true;
@@ -245,16 +241,14 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
}
previous_virtual_method_idx = method_idx;
InvokeType type = it.GetMethodInvokeType(*class_def);
- mirror::ArtMethod* method =
- linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
- NullHandle<mirror::ArtMethod>(), type);
+ ArtMethod* method = linker->ResolveMethod(
+ *dex_file, method_idx, dex_cache, class_loader, nullptr, type);
if (method == nullptr) {
DCHECK(self->IsExceptionPending());
// We couldn't resolve the method, but continue regardless.
self->ClearException();
}
StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
MethodVerifier::FailureKind result = VerifyMethod(self,
method_idx,
dex_file,
@@ -262,10 +256,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
class_loader,
class_def,
it.GetMethodCodeItem(),
- h_method,
- it.GetMethodAccessFlags(),
- allow_soft_failures,
- false);
+ method, it.GetMethodAccessFlags(), allow_soft_failures, false);
if (result != kNoFailure) {
if (result == kHardFailure) {
hard_fail = true;
@@ -305,7 +296,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyMethod(Thread* self, uint32_t
Handle<mirror::ClassLoader> class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
- Handle<mirror::ArtMethod> method,
+ ArtMethod* method,
uint32_t method_access_flags,
bool allow_soft_failures,
bool need_precise_constants) {
@@ -355,7 +346,7 @@ MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self, std::ostream&
Handle<mirror::ClassLoader> class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
- Handle<mirror::ArtMethod> method,
+ ArtMethod* method,
uint32_t method_access_flags) {
MethodVerifier* verifier = new MethodVerifier(self, dex_file, dex_cache, class_loader,
class_def, code_item, dex_method_idx, method,
@@ -379,7 +370,7 @@ MethodVerifier::MethodVerifier(Thread* self,
Handle<mirror::ClassLoader> class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item, uint32_t dex_method_idx,
- Handle<mirror::ArtMethod> method, uint32_t method_access_flags,
+ ArtMethod* method, uint32_t method_access_flags,
bool can_load_classes, bool allow_soft_failures,
bool need_precise_constants, bool verify_to_dump,
bool allow_thread_suspension)
@@ -418,15 +409,13 @@ MethodVerifier::~MethodVerifier() {
STLDeleteElements(&failure_messages_);
}
-void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
+void MethodVerifier::FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc,
std::vector<uint32_t>* monitor_enter_dex_pcs) {
- Thread* self = Thread::Current();
- StackHandleScope<3> hs(self);
+ StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
- Handle<mirror::ArtMethod> method(hs.NewHandle(m));
- MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
- m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
+ MethodVerifier verifier(hs.Self(), m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+ m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(),
false, true, false, false);
verifier.interesting_dex_pc_ = dex_pc;
verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
@@ -465,16 +454,13 @@ void MethodVerifier::FindLocksAtDexPc() {
Verify();
}
-ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
- uint32_t dex_pc) {
- Thread* self = Thread::Current();
- StackHandleScope<3> hs(self);
+ArtField* MethodVerifier::FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc) {
+ StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
- Handle<mirror::ArtMethod> method(hs.NewHandle(m));
- MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
- m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
- true, true, false, true);
+ MethodVerifier verifier(hs.Self(), m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+ m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
+ true, false, true);
return verifier.FindAccessedFieldAtDexPc(dex_pc);
}
@@ -497,20 +483,17 @@ ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
return GetQuickFieldAccess(inst, register_line);
}
-mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(mirror::ArtMethod* m,
- uint32_t dex_pc) {
- Thread* self = Thread::Current();
- StackHandleScope<3> hs(self);
+ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc) {
+ StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
- Handle<mirror::ArtMethod> method(hs.NewHandle(m));
- MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
- m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
- true, true, false, true);
+ MethodVerifier verifier(hs.Self(), m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+ m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
+ true, false, true);
return verifier.FindInvokedMethodAtDexPc(dex_pc);
}
-mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) {
+ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) {
CHECK(code_item_ != nullptr); // This only makes sense for methods with code.
// Strictly speaking, we ought to be able to get away with doing a subset of the full method
@@ -530,14 +513,13 @@ mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) {
return GetQuickInvokedMethod(inst, register_line, is_range, false);
}
-SafeMap<uint32_t, std::set<uint32_t>> MethodVerifier::FindStringInitMap(mirror::ArtMethod* m) {
+SafeMap<uint32_t, std::set<uint32_t>> MethodVerifier::FindStringInitMap(ArtMethod* m) {
Thread* self = Thread::Current();
- StackHandleScope<3> hs(self);
+ StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
- Handle<mirror::ArtMethod> method(hs.NewHandle(m));
MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
- m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
+ m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(),
true, true, false, true);
return verifier.FindStringInitMap();
}
@@ -2374,15 +2356,13 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
bool is_super = (inst->Opcode() == Instruction::INVOKE_SUPER ||
inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
- mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range,
- is_super);
+ ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range, is_super);
const RegType* return_type = nullptr;
if (called_method != nullptr) {
StackHandleScope<1> hs(self_);
- Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method));
- mirror::Class* return_type_class = h_called_method->GetReturnType(can_load_classes_);
+ mirror::Class* return_type_class = called_method->GetReturnType(can_load_classes_);
if (return_type_class != nullptr) {
- return_type = &reg_types_.FromClass(h_called_method->GetReturnTypeDescriptor(),
+ return_type = &reg_types_.FromClass(called_method->GetReturnTypeDescriptor(),
return_type_class,
return_type_class->CannotBeAssignedFromOtherTypes());
} else {
@@ -2408,10 +2388,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::INVOKE_DIRECT:
case Instruction::INVOKE_DIRECT_RANGE: {
bool is_range = (inst->Opcode() == Instruction::INVOKE_DIRECT_RANGE);
- mirror::ArtMethod* called_method = VerifyInvocationArgs(inst,
- METHOD_DIRECT,
- is_range,
- false);
+ ArtMethod* called_method = VerifyInvocationArgs(inst,
+ METHOD_DIRECT,
+ is_range,
+ false);
const char* return_type_descriptor;
bool is_constructor;
const RegType* return_type = nullptr;
@@ -2425,8 +2405,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
is_constructor = called_method->IsConstructor();
return_type_descriptor = called_method->GetReturnTypeDescriptor();
StackHandleScope<1> hs(self_);
- Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method));
- mirror::Class* return_type_class = h_called_method->GetReturnType(can_load_classes_);
+ mirror::Class* return_type_class = called_method->GetReturnType(can_load_classes_);
if (return_type_class != nullptr) {
return_type = &reg_types_.FromClass(return_type_descriptor,
return_type_class,
@@ -2492,10 +2471,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE: {
bool is_range = (inst->Opcode() == Instruction::INVOKE_STATIC_RANGE);
- mirror::ArtMethod* called_method = VerifyInvocationArgs(inst,
- METHOD_STATIC,
- is_range,
- false);
+ ArtMethod* called_method = VerifyInvocationArgs(inst,
+ METHOD_STATIC,
+ is_range,
+ false);
const char* descriptor;
if (called_method == nullptr) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
@@ -2517,10 +2496,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_INTERFACE_RANGE: {
bool is_range = (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
- mirror::ArtMethod* abs_method = VerifyInvocationArgs(inst,
- METHOD_INTERFACE,
- is_range,
- false);
+ ArtMethod* abs_method = VerifyInvocationArgs(inst,
+ METHOD_INTERFACE,
+ is_range,
+ false);
if (abs_method != nullptr) {
mirror::Class* called_interface = abs_method->GetDeclaringClass();
if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) {
@@ -2845,7 +2824,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
- mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
+ ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
if (called_method != nullptr) {
const char* descriptor = called_method->GetReturnTypeDescriptor();
const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
@@ -3205,8 +3184,8 @@ const RegType& MethodVerifier::GetCaughtExceptionType() {
return *common_super;
}
-mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx,
- MethodType method_type) {
+ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
+ uint32_t dex_method_idx, MethodType method_type) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
if (klass_type.IsConflict()) {
@@ -3220,26 +3199,28 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth
}
mirror::Class* klass = klass_type.GetClass();
const RegType& referrer = GetDeclaringClass();
- mirror::ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx);
+ auto* cl = Runtime::Current()->GetClassLinker();
+ auto pointer_size = cl->GetImagePointerSize();
+ ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx, pointer_size);
if (res_method == nullptr) {
const char* name = dex_file_->GetMethodName(method_id);
const Signature signature = dex_file_->GetMethodSignature(method_id);
if (method_type == METHOD_DIRECT || method_type == METHOD_STATIC) {
- res_method = klass->FindDirectMethod(name, signature);
+ res_method = klass->FindDirectMethod(name, signature, pointer_size);
} else if (method_type == METHOD_INTERFACE) {
- res_method = klass->FindInterfaceMethod(name, signature);
+ res_method = klass->FindInterfaceMethod(name, signature, pointer_size);
} else {
- res_method = klass->FindVirtualMethod(name, signature);
+ res_method = klass->FindVirtualMethod(name, signature, pointer_size);
}
if (res_method != nullptr) {
- dex_cache_->SetResolvedMethod(dex_method_idx, res_method);
+ dex_cache_->SetResolvedMethod(dex_method_idx, res_method, pointer_size);
} else {
// If a virtual or interface method wasn't found with the expected type, look in
// the direct methods. This can happen when the wrong invoke type is used or when
// a class has changed, and will be flagged as an error in later checks.
if (method_type == METHOD_INTERFACE || method_type == METHOD_VIRTUAL) {
- res_method = klass->FindDirectMethod(name, signature);
+ res_method = klass->FindDirectMethod(name, signature, pointer_size);
}
if (res_method == nullptr) {
Fail(VERIFY_ERROR_NO_METHOD) << "couldn't find method "
@@ -3298,10 +3279,8 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth
}
template <class T>
-mirror::ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(T* it, const Instruction* inst,
- MethodType method_type,
- bool is_range,
- mirror::ArtMethod* res_method) {
+ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(
+ T* it, const Instruction* inst, MethodType method_type, bool is_range, ArtMethod* res_method) {
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
// definition (which doesn't have register count values).
@@ -3431,7 +3410,7 @@ void MethodVerifier::VerifyInvocationArgsUnresolvedMethod(const Instruction* ins
class MethodParamListDescriptorIterator {
public:
- explicit MethodParamListDescriptorIterator(mirror::ArtMethod* res_method) :
+ explicit MethodParamListDescriptorIterator(ArtMethod* res_method) :
res_method_(res_method), pos_(0), params_(res_method->GetParameterTypeList()),
params_size_(params_ == nullptr ? 0 : params_->Size()) {
}
@@ -3449,21 +3428,19 @@ class MethodParamListDescriptorIterator {
}
private:
- mirror::ArtMethod* res_method_;
+ ArtMethod* res_method_;
size_t pos_;
const DexFile::TypeList* params_;
const size_t params_size_;
};
-mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
- MethodType method_type,
- bool is_range,
- bool is_super) {
+ArtMethod* MethodVerifier::VerifyInvocationArgs(
+ const Instruction* inst, MethodType method_type, bool is_range, bool is_super) {
// Resolve the method. This could be an abstract or concrete method depending on what sort of call
// we're making.
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- mirror::ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
+ ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
if (res_method == nullptr) { // error or class is unresolved
// Check what we can statically.
if (!have_pending_hard_failure_) {
@@ -3500,9 +3477,8 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
is_range, res_method);
}
-mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst,
- RegisterLine* reg_line, bool is_range,
- bool allow_failure) {
+ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
+ bool is_range, bool allow_failure) {
if (is_range) {
DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
} else {
@@ -3532,13 +3508,15 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst
return nullptr;
}
uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+ auto* cl = Runtime::Current()->GetClassLinker();
+ auto pointer_size = cl->GetImagePointerSize();
if (static_cast<int32_t>(vtable_index) >= dispatch_class->GetVTableLength()) {
FailOrAbort(this, allow_failure,
"Receiver class has not enough vtable slots for quickened invoke at ",
work_insn_idx_);
return nullptr;
}
- mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
+ ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index, pointer_size);
if (self_->IsExceptionPending()) {
FailOrAbort(this, allow_failure, "Unexpected exception pending for quickened invoke at ",
work_insn_idx_);
@@ -3547,12 +3525,11 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst
return res_method;
}
-mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst,
- bool is_range) {
+ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range) {
DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_)
<< PrettyMethod(dex_method_idx_, *dex_file_, true) << "@" << work_insn_idx_;
- mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(), is_range, false);
+ ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(), is_range, false);
if (res_method == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
return nullptr;
@@ -4258,7 +4235,7 @@ InstructionFlags* MethodVerifier::CurrentInsnFlags() {
const RegType& MethodVerifier::GetMethodReturnType() {
if (return_type_ == nullptr) {
- if (mirror_method_.Get() != nullptr) {
+ if (mirror_method_ != nullptr) {
mirror::Class* return_type_class = mirror_method_->GetReturnType(can_load_classes_);
if (return_type_class != nullptr) {
return_type_ = &reg_types_.FromClass(mirror_method_->GetReturnTypeDescriptor(),
@@ -4285,7 +4262,7 @@ const RegType& MethodVerifier::GetDeclaringClass() {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* descriptor
= dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
- if (mirror_method_.Get() != nullptr) {
+ if (mirror_method_ != nullptr) {
mirror::Class* klass = mirror_method_->GetDeclaringClass();
declaring_class_ = &reg_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes());
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 452d1dd..873b8ab 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -152,12 +152,11 @@ class MethodVerifier {
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
const DexFile::ClassDef* class_def,
- const DexFile::CodeItem* code_item,
- Handle<mirror::ArtMethod> method,
+ const DexFile::CodeItem* code_item, ArtMethod* method,
uint32_t method_access_flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static FailureKind VerifyMethod(mirror::ArtMethod* method, bool allow_soft_failures,
+ static FailureKind VerifyMethod(ArtMethod* method, bool allow_soft_failures,
std::string* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint8_t EncodePcToReferenceMapData() const;
@@ -185,21 +184,21 @@ class MethodVerifier {
// Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
// to the locks held at 'dex_pc' in method 'm'.
- static void FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
+ static void FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc,
std::vector<uint32_t>* monitor_enter_dex_pcs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the accessed field corresponding to the quick instruction's field
// offset at 'dex_pc' in method 'm'.
- static ArtField* FindAccessedFieldAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc)
+ static ArtField* FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the invoked method corresponding to the quick instruction's vtable
// index at 'dex_pc' in method 'm'.
- static mirror::ArtMethod* FindInvokedMethodAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc)
+ static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(mirror::ArtMethod* m)
+ static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -212,7 +211,7 @@ class MethodVerifier {
MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item, uint32_t method_idx,
- Handle<mirror::ArtMethod> method,
+ ArtMethod* method,
uint32_t access_flags, bool can_load_classes, bool allow_soft_failures,
bool need_precise_constants, bool allow_thread_suspension)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -248,7 +247,7 @@ class MethodVerifier {
const RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the method of a quick invoke or null if it cannot be found.
- mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
+ ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
bool is_range, bool allow_failure)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the access field of a quick field access (iget/iput-quick) or null
@@ -275,7 +274,7 @@ class MethodVerifier {
MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item, uint32_t method_idx,
- Handle<mirror::ArtMethod> method, uint32_t access_flags,
+ ArtMethod* method, uint32_t access_flags,
bool can_load_classes, bool allow_soft_failures, bool need_precise_constants,
bool verify_to_dump, bool allow_thread_suspension)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -302,7 +301,7 @@ class MethodVerifier {
Handle<mirror::ClassLoader> class_loader,
const DexFile::ClassDef* class_def_idx,
const DexFile::CodeItem* code_item,
- Handle<mirror::ArtMethod> method, uint32_t method_access_flags,
+ ArtMethod* method, uint32_t method_access_flags,
bool allow_soft_failures, bool need_precise_constants)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -311,7 +310,7 @@ class MethodVerifier {
ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc)
+ ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
SafeMap<uint32_t, std::set<uint32_t>>& FindStringInitMap()
@@ -573,7 +572,7 @@ class MethodVerifier {
* the referrer can access the resolved method.
* Does not throw exceptions.
*/
- mirror::ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type)
+ ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -598,7 +597,7 @@ class MethodVerifier {
* Returns the resolved method on success, null on failure (with *failure
* set appropriately).
*/
- mirror::ArtMethod* VerifyInvocationArgs(const Instruction* inst,
+ ArtMethod* VerifyInvocationArgs(const Instruction* inst,
MethodType method_type,
bool is_range, bool is_super)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -610,12 +609,12 @@ class MethodVerifier {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <class T>
- mirror::ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst,
+ ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst,
MethodType method_type, bool is_range,
- mirror::ArtMethod* res_method)
+ ArtMethod* res_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range)
+ ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -680,7 +679,7 @@ class MethodVerifier {
const uint32_t dex_method_idx_; // The method we're working on.
// Its object representation if known.
- Handle<mirror::ArtMethod> mirror_method_ GUARDED_BY(Locks::mutator_lock_);
+ ArtMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
const uint32_t method_access_flags_; // Method's access flags.
const RegType* return_type_; // Lazily computed return type of the method.
const DexFile* const dex_file_; // The dex file containing the method.
diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h
index 39df375..f7a8249 100644
--- a/runtime/verify_object-inl.h
+++ b/runtime/verify_object-inl.h
@@ -20,7 +20,6 @@
#include "verify_object.h"
#include "gc/heap.h"
-#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
namespace art {
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 2843806..3dbfe1b 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -22,6 +22,7 @@
#include "base/logging.h"
#include "mirror/class.h"
+#include "mirror/throwable.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -41,7 +42,6 @@ jclass WellKnownClasses::java_lang_Error;
jclass WellKnownClasses::java_lang_Object;
jclass WellKnownClasses::java_lang_OutOfMemoryError;
jclass WellKnownClasses::java_lang_reflect_AbstractMethod;
-jclass WellKnownClasses::java_lang_reflect_ArtMethod;
jclass WellKnownClasses::java_lang_reflect_Constructor;
jclass WellKnownClasses::java_lang_reflect_Field;
jclass WellKnownClasses::java_lang_reflect_Method;
@@ -165,11 +165,13 @@ static jclass CacheClass(JNIEnv* env, const char* jni_class_name) {
static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static,
const char* name, const char* signature) {
- jfieldID fid = (is_static ?
- env->GetStaticFieldID(c, name, signature) :
- env->GetFieldID(c, name, signature));
+ jfieldID fid = is_static ? env->GetStaticFieldID(c, name, signature) :
+ env->GetFieldID(c, name, signature);
if (fid == nullptr) {
ScopedObjectAccess soa(env);
+ if (soa.Self()->IsExceptionPending()) {
+ LOG(INTERNAL_FATAL) << soa.Self()->GetException()->Dump() << '\n';
+ }
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
LOG(FATAL) << "Couldn't find field \"" << name << "\" with signature \"" << signature << "\": "
@@ -180,11 +182,13 @@ static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static,
jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static,
const char* name, const char* signature) {
- jmethodID mid = (is_static ?
- env->GetStaticMethodID(c, name, signature) :
- env->GetMethodID(c, name, signature));
+ jmethodID mid = is_static ? env->GetStaticMethodID(c, name, signature) :
+ env->GetMethodID(c, name, signature);
if (mid == nullptr) {
ScopedObjectAccess soa(env);
+ if (soa.Self()->IsExceptionPending()) {
+ LOG(INTERNAL_FATAL) << soa.Self()->GetException()->Dump() << '\n';
+ }
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
LOG(FATAL) << "Couldn't find method \"" << name << "\" with signature \"" << signature << "\": "
@@ -213,7 +217,6 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_OutOfMemoryError = CacheClass(env, "java/lang/OutOfMemoryError");
java_lang_Error = CacheClass(env, "java/lang/Error");
java_lang_reflect_AbstractMethod = CacheClass(env, "java/lang/reflect/AbstractMethod");
- java_lang_reflect_ArtMethod = CacheClass(env, "java/lang/reflect/ArtMethod");
java_lang_reflect_Constructor = CacheClass(env, "java/lang/reflect/Constructor");
java_lang_reflect_Field = CacheClass(env, "java/lang/reflect/Field");
java_lang_reflect_Method = CacheClass(env, "java/lang/reflect/Method");
@@ -334,7 +337,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_Throwable_stackTrace = CacheField(env, java_lang_Throwable, false, "stackTrace", "[Ljava/lang/StackTraceElement;");
java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "stackState", "Ljava/lang/Object;");
java_lang_Throwable_suppressedExceptions = CacheField(env, java_lang_Throwable, false, "suppressedExceptions", "Ljava/util/List;");
- java_lang_reflect_AbstractMethod_artMethod = CacheField(env, java_lang_reflect_AbstractMethod, false, "artMethod", "Ljava/lang/reflect/ArtMethod;");
+ java_lang_reflect_AbstractMethod_artMethod = CacheField(env, java_lang_reflect_AbstractMethod, false, "artMethod", "J");
java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;");
java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I");
java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "effectiveDirectAddress", "J");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index acb2656..d25d1c3 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -53,7 +53,6 @@ struct WellKnownClasses {
static jclass java_lang_Object;
static jclass java_lang_OutOfMemoryError;
static jclass java_lang_reflect_AbstractMethod;
- static jclass java_lang_reflect_ArtMethod;
static jclass java_lang_reflect_Constructor;
static jclass java_lang_reflect_Field;
static jclass java_lang_reflect_Method;