summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-04-22 13:56:20 -0700
committerMathieu Chartier <mathieuc@google.com>2015-06-02 09:21:27 -0700
commit3d21bdf8894e780d349c481e5c9e29fe1556051c (patch)
tree61a5231f36c0dabd73457fec81df103462a05aff /compiler
parent71f0a8a123fa27bdc857a98afebbaf0ed09dac15 (diff)
downloadart-3d21bdf8894e780d349c481e5c9e29fe1556051c.zip
art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.gz
art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.bz2
Move mirror::ArtMethod to native
Optimizing + quick tests are passing, devices boot. TODO: Test and fix bugs in mips64. Saves 16 bytes per most ArtMethod, 7.5MB reduction in system PSS. Some of the savings are from removal of virtual methods and direct methods object arrays. Bug: 19264997 (cherry picked from commit e401d146407d61eeb99f8d6176b2ac13c4df1e33) Change-Id: I622469a0cfa0e7082a2119f3d6a9491eb61e3f3d Fix some ArtMethod related bugs Added root visiting for runtime methods, not currently required since the GcRoots in these methods are null. Added missing GetInterfaceMethodIfProxy in GetMethodLine, fixes --trace run-tests 005, 044. Fixed optimizing compiler bug where we used a normal stack location instead of double on ARM64, this fixes the debuggable tests. TODO: Fix JDWP tests. Bug: 19264997 Change-Id: I7c55f69c61d1b45351fd0dc7185ffe5efad82bd3 ART: Fix casts for 64-bit pointers on 32-bit compiler. Bug: 19264997 Change-Id: Ief45cdd4bae5a43fc8bfdfa7cf744e2c57529457 Fix JDWP tests after ArtMethod change Fixes Throwable::GetStackDepth for exception event detection after internal stack trace representation change. Adds missing ArtMethod::GetInterfaceMethodIfProxy call in case of proxy method. Bug: 19264997 Change-Id: I363e293796848c3ec491c963813f62d868da44d2 Fix accidental IMT and root marking regression Was always using the conflict trampoline. Also included fix for regression in GC time caused by extra roots. Most of the regression was IMT. Fixed bug in DumpGcPerformanceInfo where we would get SIGABRT due to detached thread. EvaluateAndApplyChanges: From ~2500 -> ~1980 GC time: 8.2s -> 7.2s due to 1s less of MarkConcurrentRoots Bug: 19264997 Change-Id: I4333e80a8268c2ed1284f87f25b9f113d4f2c7e0 Fix bogus image test assert Previously we were comparing the size of the non moving space to size of the image file. Now we properly compare the size of the image space against the size of the image file. Bug: 19264997 Change-Id: I7359f1f73ae3df60c5147245935a24431c04808a [MIPS64] Fix art_quick_invoke_stub argument offsets. ArtMethod reference's size got bigger, so we need to move other args and leave enough space for ArtMethod* and 'this' pointer. This fixes mips64 boot. Bug: 19264997 Change-Id: I47198d5f39a4caab30b3b77479d5eedaad5006ab
Diffstat (limited to 'compiler')
-rw-r--r--compiler/common_compiler_test.cc33
-rw-r--r--compiler/common_compiler_test.h4
-rw-r--r--compiler/compiler.h7
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc2
-rw-r--r--compiler/dex/mir_method_info.cc2
-rw-r--r--compiler/dex/mir_optimization.cc3
-rw-r--r--compiler/dex/quick/arm/call_arm.cc10
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h3
-rw-r--r--compiler/dex/quick/arm/int_arm.cc4
-rw-r--r--compiler/dex/quick/arm64/arm64_lir.h4
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc25
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h3
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc9
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc9
-rw-r--r--compiler/dex/quick/codegen_util.cc7
-rw-r--r--compiler/dex/quick/gen_common.cc39
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc33
-rw-r--r--compiler/dex/quick/gen_loadstore.cc20
-rw-r--r--compiler/dex/quick/mips/call_mips.cc23
-rw-r--r--compiler/dex/quick/mir_to_lir.cc4
-rw-r--r--compiler/dex/quick/mir_to_lir.h14
-rw-r--r--compiler/dex/quick/quick_cfi_test.cc3
-rw-r--r--compiler/dex/quick/quick_cfi_test_expected.inc50
-rw-r--r--compiler/dex/quick/quick_compiler.cc4
-rw-r--r--compiler/dex/quick/quick_compiler.h2
-rw-r--r--compiler/dex/quick/x86/call_x86.cc21
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h3
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc20
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc7
-rw-r--r--compiler/dex/quick/x86/x86_lir.h4
-rw-r--r--compiler/dex/type_inference.cc4
-rw-r--r--compiler/dex/type_inference.h4
-rw-r--r--compiler/dex/verified_method.cc18
-rw-r--r--compiler/driver/compiler_driver-inl.h52
-rw-r--r--compiler/driver/compiler_driver.cc113
-rw-r--r--compiler/driver/compiler_driver.h18
-rw-r--r--compiler/driver/compiler_driver_test.cc29
-rw-r--r--compiler/elf_writer.cc2
-rw-r--r--compiler/image_test.cc20
-rw-r--r--compiler/image_writer.cc572
-rw-r--r--compiler/image_writer.h105
-rw-r--r--compiler/jit/jit_compiler.cc30
-rw-r--r--compiler/jit/jit_compiler.h11
-rw-r--r--compiler/jni/jni_cfi_test_expected.inc131
-rw-r--r--compiler/jni/jni_compiler_test.cc11
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.cc3
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.cc6
-rw-r--r--compiler/jni/quick/calling_convention.cc4
-rw-r--r--compiler/jni/quick/calling_convention.h11
-rw-r--r--compiler/jni/quick/jni_compiler.cc37
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.cc2
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.cc8
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.cc2
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc8
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2.cc4
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc13
-rw-r--r--compiler/oat_test.cc25
-rw-r--r--compiler/oat_writer.cc37
-rw-r--r--compiler/optimizing/builder.cc5
-rw-r--r--compiler/optimizing/code_generator.cc11
-rw-r--r--compiler/optimizing/code_generator.h15
-rw-r--r--compiler/optimizing/code_generator_arm.cc31
-rw-r--r--compiler/optimizing/code_generator_arm.h6
-rw-r--r--compiler/optimizing/code_generator_arm64.cc94
-rw-r--r--compiler/optimizing/code_generator_arm64.h8
-rw-r--r--compiler/optimizing/code_generator_x86.cc31
-rw-r--r--compiler/optimizing/code_generator_x86.h6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc39
-rw-r--r--compiler/optimizing/code_generator_x86_64.h6
-rw-r--r--compiler/optimizing/inliner.cc12
-rw-r--r--compiler/optimizing/inliner.h2
-rw-r--r--compiler/optimizing/intrinsics_arm.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc2
-rw-r--r--compiler/optimizing/nodes.h4
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc2
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc47
-rw-r--r--compiler/optimizing/optimizing_compiler.cc7
-rw-r--r--compiler/optimizing/optimizing_unit_test.h3
-rw-r--r--compiler/optimizing/reference_type_propagation.cc2
-rw-r--r--compiler/optimizing/register_allocator.cc4
-rw-r--r--compiler/utils/arm/assembler_arm.cc10
-rw-r--r--compiler/utils/arm/assembler_arm.h5
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc28
-rw-r--r--compiler/utils/arm64/assembler_arm64.h5
-rw-r--r--compiler/utils/assembler.h6
-rw-r--r--compiler/utils/dex_cache_arrays_layout-inl.h13
-rw-r--r--compiler/utils/mips/assembler_mips.cc6
-rw-r--r--compiler/utils/mips/assembler_mips.h5
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc11
-rw-r--r--compiler/utils/mips64/assembler_mips64.h5
-rw-r--r--compiler/utils/x86/assembler_x86.cc21
-rw-r--r--compiler/utils/x86/assembler_x86.h5
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc22
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h3
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc2
97 files changed, 1208 insertions, 934 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 5a9e04f..0a1e2e3 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -17,6 +17,8 @@
#include "common_compiler_test.h"
#include "arch/instruction_set_features.h"
+#include "art_field-inl.h"
+#include "art_method.h"
#include "class_linker.h"
#include "compiled_method.h"
#include "dex/pass_manager.h"
@@ -26,7 +28,8 @@
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "interpreter/interpreter.h"
-#include "mirror/art_method.h"
+#include "mirror/class_loader.h"
+#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "mirror/object-inl.h"
#include "scoped_thread_state_change.h"
@@ -38,7 +41,7 @@ namespace art {
CommonCompilerTest::CommonCompilerTest() {}
CommonCompilerTest::~CommonCompilerTest() {}
-void CommonCompilerTest::MakeExecutable(mirror::ArtMethod* method) {
+void CommonCompilerTest::MakeExecutable(ArtMethod* method) {
CHECK(method != nullptr);
const CompiledMethod* compiled_method = nullptr;
@@ -132,11 +135,12 @@ void CommonCompilerTest::MakeExecutable(mirror::ClassLoader* class_loader, const
Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
- for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
- MakeExecutable(klass->GetDirectMethod(i));
+ size_t pointer_size = class_linker_->GetImagePointerSize();
+ for (auto& m : klass->GetDirectMethods(pointer_size)) {
+ MakeExecutable(&m);
}
- for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
- MakeExecutable(klass->GetVirtualMethod(i));
+ for (auto& m : klass->GetVirtualMethods(pointer_size)) {
+ MakeExecutable(&m);
}
}
@@ -225,15 +229,16 @@ void CommonCompilerTest::CompileClass(mirror::ClassLoader* class_loader, const c
Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
- for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
- CompileMethod(klass->GetDirectMethod(i));
+ auto pointer_size = class_linker_->GetImagePointerSize();
+ for (auto& m : klass->GetDirectMethods(pointer_size)) {
+ CompileMethod(&m);
}
- for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
- CompileMethod(klass->GetVirtualMethod(i));
+ for (auto& m : klass->GetVirtualMethods(pointer_size)) {
+ CompileMethod(&m);
}
}
-void CommonCompilerTest::CompileMethod(mirror::ArtMethod* method) {
+void CommonCompilerTest::CompileMethod(ArtMethod* method) {
CHECK(method != nullptr);
TimingLogger timings("CommonTest::CompileMethod", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
@@ -249,7 +254,8 @@ void CommonCompilerTest::CompileDirectMethod(Handle<mirror::ClassLoader> class_l
Thread* self = Thread::Current();
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
- mirror::ArtMethod* method = klass->FindDirectMethod(method_name, signature);
+ auto pointer_size = class_linker_->GetImagePointerSize();
+ ArtMethod* method = klass->FindDirectMethod(method_name, signature, pointer_size);
CHECK(method != nullptr) << "Direct method not found: "
<< class_name << "." << method_name << signature;
CompileMethod(method);
@@ -262,7 +268,8 @@ void CommonCompilerTest::CompileVirtualMethod(Handle<mirror::ClassLoader> class_
Thread* self = Thread::Current();
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
- mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
+ auto pointer_size = class_linker_->GetImagePointerSize();
+ ArtMethod* method = klass->FindVirtualMethod(method_name, signature, pointer_size);
CHECK(method != nullptr) << "Virtual method not found: "
<< class_name << "." << method_name << signature;
CompileMethod(method);
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 8d80a2d..769319b 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -45,7 +45,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
// Create an OatMethod based on pointers (for unit tests).
OatFile::OatMethod CreateOatMethod(const void* code);
- void MakeExecutable(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MakeExecutable(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void MakeExecutable(const void* code_start, size_t code_length);
@@ -74,7 +74,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CompileMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 94b0fe3..e5d1aff 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -22,16 +22,13 @@
namespace art {
+class ArtMethod;
class Backend;
struct CompilationUnit;
class CompilerDriver;
class CompiledMethod;
class OatWriter;
-namespace mirror {
- class ArtMethod;
-}
-
class Compiler {
public:
enum Kind {
@@ -60,7 +57,7 @@ class Compiler {
uint32_t method_idx,
const DexFile& dex_file) const = 0;
- virtual uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const
+ virtual uintptr_t GetEntryPointOf(ArtMethod* method) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
uint64_t GetMaximumCompilationTimeBeforeWarning() const {
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index d1ddfda..bd59046 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -15,13 +15,13 @@
*/
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "thread-inl.h"
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index 94be1fd..be913fe 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -83,7 +83,7 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
MethodReference devirt_ref(it->target_dex_file_, it->target_method_idx_);
MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr;
InvokeType invoke_type = it->GetInvokeType();
- mirror::ArtMethod* resolved_method = nullptr;
+ ArtMethod* resolved_method = nullptr;
bool string_init = false;
if (default_inliner->IsStringInitMethodIndex(it->MethodIndex())) {
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 7679db8..7b1ec39 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -249,7 +249,7 @@ int MIRGraph::GetSSAUseCount(int s_reg) {
size_t MIRGraph::GetNumBytesForSpecialTemps() const {
// This logic is written with assumption that Method* is only special temp.
DCHECK_EQ(max_available_special_compiler_temps_, 1u);
- return sizeof(StackReference<mirror::ArtMethod>);
+ return InstructionSetPointerSize(cu_->instruction_set);
}
size_t MIRGraph::GetNumAvailableVRTemps() {
@@ -316,6 +316,7 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
// The vreg is always the first special temp for method ptr.
compiler_temp->v_reg = GetFirstSpecialTempVR();
+ CHECK(reg_location_ == nullptr);
} else if (ct_type == kCompilerTempBackend) {
requested_backend_temp_ = true;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 822ea21..981ab2c 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -19,6 +19,7 @@
#include "codegen_arm.h"
#include "arm_lir.h"
+#include "art_method.h"
#include "base/bit_utils.h"
#include "base/logging.h"
#include "dex/mir_graph.h"
@@ -27,7 +28,6 @@
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "gc/accounting/card_table.h"
-#include "mirror/art_method.h"
#include "mirror/object_array-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "utils/dex_cache_arrays_layout-inl.h"
@@ -637,7 +637,7 @@ int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
@@ -678,7 +678,7 @@ int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
case 1: // Get method->dex_cache_resolved_methods_
if (!use_pc_rel) {
cg->LoadRefDisp(arg0_ref,
- mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
arg0_ref,
kNotVolatile);
}
@@ -708,14 +708,14 @@ int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
kNotVolatile);
} else {
size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index);
- cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref);
+ cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref, false);
}
break;
case 3: // Grab the code from the method*
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 83b27df..b94e707 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -83,7 +83,8 @@ class ArmMir2Lir FINAL : public Mir2Lir {
void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE;
- void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest) OVERRIDE;
+ void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
+ bool wide) OVERRIDE;
// Required for target - register utilities.
RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 7de8e55..6d30e72 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1107,7 +1107,9 @@ void ArmMir2Lir::OpPcRelDexCacheArrayAddr(const DexFile* dex_file, int offset, R
dex_cache_access_insns_.push_back(movt);
}
-void ArmMir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest) {
+void ArmMir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
+ bool wide) {
+ DCHECK(!wide) << "Unsupported";
if (dex_cache_arrays_base_reg_.Valid()) {
LoadRefDisp(dex_cache_arrays_base_reg_, offset - dex_cache_arrays_min_offset_,
r_dest, kNotVolatile);
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 5bf77aa..c530a8b 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -71,7 +71,7 @@ namespace art {
* | IN[ins-1] | {Note: resides in caller's frame}
* | . |
* | IN[0] |
- * | caller's method (StackReference<ArtMethod>)| {This is a compressed (4-bytes) reference}
+ * | caller's method ArtMethod* | {Pointer sized reference}
* +============================================+ {Note: start of callee's frame}
* | spill region | {variable sized - will include lr if non-leaf}
* +--------------------------------------------+
@@ -90,7 +90,7 @@ namespace art {
* | OUT[outs-2] |
* | . |
* | OUT[0] |
- * | current method (StackReference<ArtMethod>) | <<== sp w/ 16-byte alignment
+ * | current method ArtMethod* | <<== sp w/ 16-byte alignment
* +============================================+
*/
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index e49e40d..83a6aff 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -19,6 +19,7 @@
#include "codegen_arm64.h"
#include "arm64_lir.h"
+#include "art_method.h"
#include "base/logging.h"
#include "dex/mir_graph.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
@@ -27,7 +28,6 @@
#include "driver/compiler_options.h"
#include "gc/accounting/card_table.h"
#include "entrypoints/quick/quick_entrypoints.h"
-#include "mirror/art_method.h"
#include "mirror/object_array-inl.h"
#include "utils/dex_cache_arrays_layout-inl.h"
@@ -456,23 +456,22 @@ static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& tar
*/
int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused_idx,
+ uint32_t unused_idx ATTRIBUTE_UNUSED,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
- UNUSED(info, unused_idx);
Arm64Mir2Lir* cg = static_cast<Arm64Mir2Lir*>(cu->cg.get());
if (info->string_init_offset != 0) {
RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
switch (state) {
case 0: { // Grab target method* from thread pointer
- cg->LoadRefDisp(rs_xSELF, info->string_init_offset, arg0_ref, kNotVolatile);
+ cg->LoadWordDisp(rs_xSELF, info->string_init_offset, arg0_ref);
break;
}
case 1: // Grab the code from the method*
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
@@ -500,7 +499,7 @@ int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
}
} else {
bool use_pc_rel = cg->CanUseOpPcRelDexCacheArrayLoad();
- RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ RegStorage arg0_ref = cg->TargetPtrReg(kArg0);
switch (state) {
case 0: // Get the current Method* [sets kArg0]
// TUNING: we can save a reg copy if Method* has been promoted.
@@ -513,7 +512,7 @@ int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
case 1: // Get method->dex_cache_resolved_methods_
if (!use_pc_rel) {
cg->LoadRefDisp(arg0_ref,
- mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
arg0_ref,
kNotVolatile);
}
@@ -536,21 +535,19 @@ int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
case 2: // Grab target method*
CHECK_EQ(cu->dex_file, target_method.dex_file);
if (!use_pc_rel) {
- cg->LoadRefDisp(arg0_ref,
- mirror::ObjectArray<mirror::Object>::OffsetOfElement(
- target_method.dex_method_index).Int32Value(),
- arg0_ref,
- kNotVolatile);
+ cg->LoadWordDisp(arg0_ref,
+ mirror::Array::DataOffset(kArm64PointerSize).Uint32Value() +
+ target_method.dex_method_index * kArm64PointerSize, arg0_ref);
} else {
size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index);
- cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref);
+ cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref, true);
}
break;
case 3: // Grab the code from the method*
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 8184f02..ca2e012 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -79,7 +79,8 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE;
- void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest) OVERRIDE;
+ void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, bool wide)
+ OVERRIDE;
LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 08aa5d2..31cf667 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -947,14 +947,17 @@ bool Arm64Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const {
return dex_cache_arrays_layout_.Valid();
}
-void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset,
- RegStorage r_dest) {
+void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
+ bool wide) {
LIR* adrp = NewLIR2(kA64Adrp2xd, r_dest.GetReg(), 0);
adrp->operands[2] = WrapPointer(dex_file);
adrp->operands[3] = offset;
adrp->operands[4] = WrapPointer(adrp);
dex_cache_access_insns_.push_back(adrp);
- LIR* ldr = LoadBaseDisp(r_dest, 0, r_dest, kReference, kNotVolatile);
+ if (wide) {
+ DCHECK(r_dest.Is64Bit());
+ }
+ LIR* ldr = LoadBaseDisp(r_dest, 0, r_dest, wide ? k64 : kReference, kNotVolatile);
ldr->operands[4] = adrp->operands[4];
ldr->flags.fixup = kFixupLabel;
dex_cache_access_insns_.push_back(ldr);
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index fc32ecd..d5de18d 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -859,7 +859,8 @@ void Arm64Mir2Lir::InstallLiteralPools() {
// PC-relative references to dex cache arrays.
for (LIR* p : dex_cache_access_insns_) {
- DCHECK(p->opcode == kA64Adrp2xd || p->opcode == kA64Ldr3rXD);
+ auto non_wide = UNWIDE(p->opcode); // May be a wide load for ArtMethod*.
+ DCHECK(non_wide == kA64Adrp2xd || non_wide == kA64Ldr3rXD) << p->opcode << " " << non_wide;
const LIR* adrp = UnwrapPointer<LIR>(p->operands[4]);
DCHECK_EQ(adrp->opcode, kA64Adrp2xd);
const DexFile* dex_file = UnwrapPointer<DexFile>(adrp->operands[2]);
@@ -895,8 +896,7 @@ void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir)
rl_src[0] = mir_graph_->GetSrc(mir, 0);
rl_src[1] = mir_graph_->GetSrc(mir, 1);
rl_src[2]= mir_graph_->GetSrc(mir, 2);
- GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2],
- (opcode == kMirOpMsubInt) ? true : false);
+ GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2], opcode == kMirOpMsubInt);
break;
case kMirOpMaddLong:
case kMirOpMsubLong:
@@ -904,8 +904,7 @@ void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir)
rl_src[0] = mir_graph_->GetSrcWide(mir, 0);
rl_src[1] = mir_graph_->GetSrcWide(mir, 2);
rl_src[2] = mir_graph_->GetSrcWide(mir, 4);
- GenMaddMsubLong(rl_dest, rl_src[0], rl_src[1], rl_src[2],
- (opcode == kMirOpMsubLong) ? true : false);
+ GenMaddMsubLong(rl_dest, rl_src[0], rl_src[1], rl_src[2], opcode == kMirOpMsubLong);
break;
default:
LOG(FATAL) << "Unexpected opcode: " << static_cast<int>(opcode);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 86bb69d..f4bf31f 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1298,8 +1298,8 @@ void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType
// resolve these invokes to the same method, so we don't care which one we record here.
data_target->operands[2] = type;
}
- // Loads an ArtMethod pointer, which is a reference as it lives in the heap.
- OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
+ // Loads an ArtMethod pointer, which is not a reference.
+ OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target);
DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
}
@@ -1322,7 +1322,8 @@ bool Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const {
void Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file ATTRIBUTE_UNUSED,
int offset ATTRIBUTE_UNUSED,
- RegStorage r_dest ATTRIBUTE_UNUSED) {
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ bool wide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "No generic implementation.";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 63f83f9..af10817 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -97,11 +97,11 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
LockTemp(r_base);
if (CanUseOpPcRelDexCacheArrayLoad()) {
uint32_t offset = dex_cache_arrays_layout_.TypeOffset(field_info.StorageIndex());
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, r_base);
+ OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, r_base, false);
} else {
// Using fixed register to sync with possible call to runtime support.
RegStorage r_method = LoadCurrMethodWithHint(r_base);
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
+ LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
kNotVolatile);
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
@@ -693,7 +693,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, OpSize size) {
// Fast path, static storage base is this method's class
r_base = AllocTempRef();
RegStorage r_method = LoadCurrMethodWithHint(r_base);
- LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
+ LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
kNotVolatile);
} else {
// Medium path, static storage base in a different class which requires checks that the other
@@ -771,7 +771,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Typ
// Fast path, static storage base is this method's class
r_base = AllocTempRef();
RegStorage r_method = LoadCurrMethodWithHint(r_base);
- LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
+ LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
kNotVolatile);
} else {
// Medium path, static storage base in a different class which requires checks that the other
@@ -1031,10 +1031,10 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
// We don't need access checks, load type from dex cache
if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg);
+ OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg, false);
} else {
int32_t dex_cache_offset =
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
+ ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
RegStorage res_reg = AllocTempRef();
RegStorage r_method = LoadCurrMethodWithHint(res_reg);
LoadRefDisp(r_method, dex_cache_offset, res_reg, kNotVolatile);
@@ -1066,13 +1066,12 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
RegStorage ret0 = TargetReg(kRet0, kRef);
if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, ret0);
+ OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, ret0, false);
} else {
// Method to declaring class.
RegStorage arg0 = TargetReg(kArg0, kRef);
RegStorage r_method = LoadCurrMethodWithHint(arg0);
- LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- arg0, kNotVolatile);
+ LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), arg0, kNotVolatile);
// Declaring class to dex cache strings.
LoadRefDisp(arg0, mirror::Class::DexCacheStringsOffset().Int32Value(), arg0, kNotVolatile);
@@ -1086,11 +1085,11 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg);
+ OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg, false);
} else {
RegLocation rl_method = LoadCurrMethod();
RegStorage res_reg = AllocTempRef();
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), res_reg,
+ LoadRefDisp(rl_method.reg, ArtMethod::DeclaringClassOffset().Int32Value(), res_reg,
kNotVolatile);
LoadRefDisp(res_reg, mirror::Class::DexCacheStringsOffset().Int32Value(), res_reg,
kNotVolatile);
@@ -1173,18 +1172,18 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
if (use_declaring_class) {
RegStorage r_method = LoadCurrMethodWithHint(check_class);
- LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class,
+ LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), check_class,
kNotVolatile);
LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
kNotVolatile);
} else if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, check_class);
+ OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, check_class, false);
LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
kNotVolatile);
} else {
RegStorage r_method = LoadCurrMethodWithHint(check_class);
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class, kNotVolatile);
LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class,
kNotVolatile);
@@ -1232,7 +1231,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
} else if (use_declaring_class) {
RegStorage r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
- LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(),
class_reg, kNotVolatile);
} else {
if (can_assume_type_is_in_dex_cache) {
@@ -1242,11 +1241,11 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg);
+ OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false);
} else {
RegStorage r_method = LoadCurrMethodWithHint(class_reg);
// Load dex cache entry into class_reg (kArg2)
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
class_reg, kNotVolatile);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
@@ -1367,17 +1366,17 @@ void Mir2Lir::GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx,
OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path
} else if (use_declaring_class) {
RegStorage method_reg = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
- LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadRefDisp(method_reg, ArtMethod::DeclaringClassOffset().Int32Value(),
class_reg, kNotVolatile);
} else {
// Load dex cache entry into class_reg (kArg2)
if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
- OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg);
+ OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false);
} else {
RegStorage r_method = LoadCurrMethodWithHint(class_reg);
- LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
class_reg, kNotVolatile);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index ab011fc..1f114cf 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -398,7 +398,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
// TODO: Support 64-bit argument registers.
void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
/*
- * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
+ * Dummy up a RegLocation for the incoming ArtMethod*
* It will attempt to keep kArg0 live (or copy it to home location
* if promoted).
*/
@@ -407,10 +407,15 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
rl_src.reg = TargetReg(kArg0, kRef);
rl_src.home = false;
MarkLive(rl_src);
- StoreValue(rl_method, rl_src);
+ if (cu_->target64) {
+ DCHECK(rl_method.wide);
+ StoreValueWide(rl_method, rl_src);
+ } else {
+ StoreValue(rl_method, rl_src);
+ }
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), 0, rl_src.reg, kWord, kNotVolatile);
}
if (mir_graph_->GetNumOfInVRs() == 0) {
@@ -498,7 +503,7 @@ static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
const CompilationUnit* cu, Mir2Lir* cg) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
- int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
InstructionSetPointerSize(cu->instruction_set)).Int32Value();
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
@@ -535,10 +540,12 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
break;
case 2: {
// Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0]
- int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
- method_idx * sizeof(mirror::Class::VTableEntry);
+ const size_t pointer_size = InstructionSetPointerSize(
+ cu->compiler_driver->GetInstructionSet());
+ int32_t offset = mirror::Class::EmbeddedVTableEntryOffset(
+ method_idx, pointer_size).Uint32Value();
// Load target method from embedded vtable to kArg0 [use kArg0, set kArg0]
- cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
+ cg->LoadWordDisp(cg->TargetPtrReg(kArg0), offset, cg->TargetPtrReg(kArg0));
break;
}
case 3:
@@ -580,10 +587,12 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
// Includes a null-check.
break;
case 3: { // Get target method [use kInvokeTgt, set kArg0]
- int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
- (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ const size_t pointer_size = InstructionSetPointerSize(
+ cu->compiler_driver->GetInstructionSet());
+ int32_t offset = mirror::Class::EmbeddedImTableEntryOffset(
+ method_idx % mirror::Class::kImtSize, pointer_size).Uint32Value();
// Load target method from embedded imtable to kArg0 [use kArg0, set kArg0]
- cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
+ cg->LoadWordDisp(cg->TargetPtrReg(kArg0), offset, cg->TargetPtrReg(kArg0));
break;
}
case 4:
@@ -967,7 +976,7 @@ bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
GenNullCheck(rl_obj.reg, info->opt_flags);
LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
- kNotVolatile);
+ kNotVolatile);
MarkPossibleNullPointerException(info->opt_flags);
StoreValue(rl_dest, rl_result);
@@ -1418,7 +1427,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
kNotVolatile);
} else {
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 4215e8b..aa95e77 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -42,7 +42,7 @@ LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) {
* register liveness. That is the responsibility of the caller.
*/
void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
- rl_src = UpdateLoc(rl_src);
+ rl_src = rl_src.wide ? UpdateLocWide(rl_src) : UpdateLoc(rl_src);
if (rl_src.location == kLocPhysReg) {
OpRegCopy(r_dest, rl_src.reg);
} else if (IsInexpensiveConstant(rl_src)) {
@@ -53,11 +53,15 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ OpSize op_size;
if (rl_src.ref) {
- LoadRefDisp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, kNotVolatile);
+ op_size = kReference;
+ } else if (rl_src.wide) {
+ op_size = k64;
} else {
- Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
+ op_size = k32;
}
+ LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, op_size, kNotVolatile);
}
}
@@ -337,7 +341,11 @@ void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) {
/* Utilities to load the current Method* */
void Mir2Lir::LoadCurrMethodDirect(RegStorage r_tgt) {
- LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt);
+ if (GetCompilationUnit()->target64) {
+ LoadValueDirectWideFixed(mir_graph_->GetMethodLoc(), r_tgt);
+ } else {
+ LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt);
+ }
}
RegStorage Mir2Lir::LoadCurrMethodWithHint(RegStorage r_hint) {
@@ -355,7 +363,9 @@ RegStorage Mir2Lir::LoadCurrMethodWithHint(RegStorage r_hint) {
}
RegLocation Mir2Lir::LoadCurrMethod() {
- return LoadValue(mir_graph_->GetMethodLoc(), kRefReg);
+ return GetCompilationUnit()->target64 ?
+ LoadValueWide(mir_graph_->GetMethodLoc(), kCoreReg) :
+ LoadValue(mir_graph_->GetMethodLoc(), kRefReg);
}
RegLocation Mir2Lir::ForceTemp(RegLocation loc) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 3d25384..da12d8e 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -18,6 +18,7 @@
#include "codegen_mips.h"
+#include "art_method.h"
#include "base/logging.h"
#include "dex/mir_graph.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
@@ -26,7 +27,6 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "mips_lir.h"
-#include "mirror/art_method.h"
#include "mirror/object_array-inl.h"
namespace art {
@@ -407,12 +407,12 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
switch (state) {
case 0: { // Grab target method* from thread pointer
- cg->LoadRefDisp(cg->TargetPtrReg(kSelf), info->string_init_offset, arg0_ref, kNotVolatile);
+ cg->LoadWordDisp(cg->TargetPtrReg(kSelf), info->string_init_offset, arg0_ref);
break;
}
case 1: // Grab the code from the method*
if (direct_code == 0) {
- int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
InstructionSetPointerSize(cu->instruction_set)).Int32Value();
cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
}
@@ -454,7 +454,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
break;
case 1: // Get method->dex_cache_resolved_methods_
cg->LoadRefDisp(arg0_ref,
- mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
arg0_ref,
kNotVolatile);
// Set up direct code if known.
@@ -471,17 +471,18 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
}
}
break;
- case 2: // Grab target method*
+ case 2: {
+ // Grab target method*
CHECK_EQ(cu->dex_file, target_method.dex_file);
- cg->LoadRefDisp(arg0_ref,
- mirror::ObjectArray<mirror::Object>::
- OffsetOfElement(target_method.dex_method_index).Int32Value(),
- arg0_ref,
- kNotVolatile);
+ const size_t pointer_size = GetInstructionSetPointerSize(cu->instruction_set);
+ cg->LoadWordDisp(arg0_ref,
+ mirror::Array::DataOffset(pointer_size).Uint32Value() +
+ target_method.dex_method_index * pointer_size, arg0_ref);
break;
+ }
case 3: // Grab the code from the method*
if (direct_code == 0) {
- int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
InstructionSetPointerSize(cu->instruction_set)).Int32Value();
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index e3e87ec..7ca03cf 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1232,6 +1232,10 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
ResetRegPool();
int start_vreg = mir_graph_->GetFirstInVR();
AppendLIR(NewLIR0(kPseudoPrologueBegin));
+ DCHECK_EQ(cu_->target64, Is64BitInstructionSet(cu_->instruction_set));
+ if (cu_->target64) {
+ DCHECK(mir_graph_->GetMethodLoc().wide);
+ }
GenEntrySequence(&mir_graph_->reg_location_[start_vreg], mir_graph_->GetMethodLoc());
AppendLIR(NewLIR0(kPseudoPrologueEnd));
DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index d54616f..73787e9 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -982,12 +982,11 @@ class Mir2Lir {
}
// Load a reference at base + displacement and decompress into register.
LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- VolatileKind is_volatile) {
+ VolatileKind is_volatile) {
return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile);
}
// Load a reference at base + index and decompress into register.
- LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
- int scale) {
+ LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale) {
return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference);
}
// Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress.
@@ -1008,12 +1007,11 @@ class Mir2Lir {
}
// Store an uncompressed reference into a compressed 32-bit container.
LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
- VolatileKind is_volatile) {
+ VolatileKind is_volatile) {
return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile);
}
// Store an uncompressed reference into a compressed 32-bit container by index.
- LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
- int scale) {
+ LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) {
return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference);
}
// Store 32 bits, regardless of target.
@@ -1117,8 +1115,10 @@ class Mir2Lir {
* @param dex_file the dex file associated with the target dex cache.
* @param offset the offset of the element in the fixed dex cache arrays' layout.
* @param r_dest the register where to load the element.
+ * @param wide, load 64 bits if true, otherwise 32 bits.
*/
- virtual void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest);
+ virtual void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
+ bool wide);
// Routines that work for the generic case, but may be overriden by target.
/*
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index b3c7355..8694ebc 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -100,7 +100,8 @@ class QuickCFITest : public CFITest {
}
}
m2l->AdjustSpillMask();
- m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef());
+ m2l->GenEntrySequence(nullptr, m2l->GetCompilationUnit()->target64 ?
+ m2l->LocCReturnWide() : m2l->LocCReturnRef());
m2l->GenExitSequence();
m2l->HandleSlowPaths();
m2l->AssembleLIR();
diff --git a/compiler/dex/quick/quick_cfi_test_expected.inc b/compiler/dex/quick/quick_cfi_test_expected.inc
index 48109d2..52d66a4 100644
--- a/compiler/dex/quick/quick_cfi_test_expected.inc
+++ b/compiler/dex/quick/quick_cfi_test_expected.inc
@@ -34,7 +34,7 @@ static constexpr uint8_t expected_cfi_kThumb2[] = {
static constexpr uint8_t expected_asm_kArm64[] = {
0xFF, 0x03, 0x01, 0xD1, 0xE8, 0xA7, 0x01, 0x6D, 0xF3, 0xD3, 0x02, 0xA9,
- 0xFE, 0x1F, 0x00, 0xF9, 0xE0, 0x03, 0x00, 0xB9, 0xE8, 0xA7, 0x41, 0x6D,
+ 0xFE, 0x1F, 0x00, 0xF9, 0xE0, 0x03, 0x00, 0xF9, 0xE8, 0xA7, 0x41, 0x6D,
0xF3, 0xD3, 0x42, 0xA9, 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91,
0xC0, 0x03, 0x5F, 0xD6,
};
@@ -54,7 +54,7 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
// 0x0000000c: .cfi_offset: r20 at cfa-16
// 0x0000000c: str lr, [sp, #56]
// 0x00000010: .cfi_offset: r30 at cfa-8
-// 0x00000010: str w0, [sp]
+// 0x00000010: str x0, [sp]
// 0x00000014: .cfi_remember_state
// 0x00000014: ldp d8, d9, [sp, #24]
// 0x00000018: .cfi_restore_extended: r72
@@ -101,15 +101,15 @@ static constexpr uint8_t expected_cfi_kX86[] = {
static constexpr uint8_t expected_asm_kX86_64[] = {
0x48, 0x83, 0xEC, 0x38, 0x48, 0x89, 0x5C, 0x24, 0x28, 0x48, 0x89, 0x6C,
0x24, 0x30, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F,
- 0x11, 0x6C, 0x24, 0x20, 0x48, 0x8B, 0xC7, 0x89, 0x3C, 0x24, 0x48, 0x8B,
- 0x5C, 0x24, 0x28, 0x48, 0x8B, 0x6C, 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x10,
- 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, 0x20, 0x48, 0x83,
- 0xC4, 0x38, 0xC3, 0x00,
+ 0x11, 0x6C, 0x24, 0x20, 0x48, 0x8B, 0xC7, 0x48, 0x89, 0x3C, 0x24, 0x48,
+ 0x8B, 0x5C, 0x24, 0x28, 0x48, 0x8B, 0x6C, 0x24, 0x30, 0xF2, 0x44, 0x0F,
+ 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, 0x20, 0x48,
+ 0x83, 0xC4, 0x38, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86_64[] = {
0x44, 0x0E, 0x40, 0x45, 0x83, 0x06, 0x45, 0x86, 0x04, 0x47, 0x9D, 0x0A,
- 0x47, 0x9E, 0x08, 0x46, 0x0A, 0x45, 0xC3, 0x45, 0xC6, 0x47, 0xDD, 0x47,
- 0xDE, 0x44, 0x0E, 0x08, 0x42, 0x0B, 0x0E, 0x40,
+ 0x47, 0x9E, 0x08, 0x47, 0x0A, 0x45, 0xC3, 0x45, 0xC6, 0x47, 0xDD, 0x47,
+ 0xDE, 0x44, 0x0E, 0x08, 0x41, 0x0B, 0x0E, 0x40,
};
// 0x00000000: subq rsp, 56
// 0x00000004: .cfi_def_cfa_offset: 64
@@ -122,20 +122,19 @@ static constexpr uint8_t expected_cfi_kX86_64[] = {
// 0x00000015: movsd [rsp + 32], xmm13
// 0x0000001c: .cfi_offset: r30 at cfa-32
// 0x0000001c: movq rax, rdi
-// 0x0000001f: mov [rsp], edi
-// 0x00000022: .cfi_remember_state
-// 0x00000022: movq rbx, [rsp + 40]
-// 0x00000027: .cfi_restore: r3
-// 0x00000027: movq rbp, [rsp + 48]
-// 0x0000002c: .cfi_restore: r6
-// 0x0000002c: movsd xmm12, [rsp + 24]
-// 0x00000033: .cfi_restore: r29
-// 0x00000033: movsd xmm13, [rsp + 32]
-// 0x0000003a: .cfi_restore: r30
-// 0x0000003a: addq rsp, 56
-// 0x0000003e: .cfi_def_cfa_offset: 8
-// 0x0000003e: ret
-// 0x0000003f: addb al, al
+// 0x0000001f: movq [rsp], rdi
+// 0x00000023: .cfi_remember_state
+// 0x00000023: movq rbx, [rsp + 40]
+// 0x00000028: .cfi_restore: r3
+// 0x00000028: movq rbp, [rsp + 48]
+// 0x0000002d: .cfi_restore: r6
+// 0x0000002d: movsd xmm12, [rsp + 24]
+// 0x00000034: .cfi_restore: r29
+// 0x00000034: movsd xmm13, [rsp + 32]
+// 0x0000003b: .cfi_restore: r30
+// 0x0000003b: addq rsp, 56
+// 0x0000003f: .cfi_def_cfa_offset: 8
+// 0x0000003f: ret
// 0x00000040: .cfi_restore_state
// 0x00000040: .cfi_def_cfa_offset: 64
@@ -172,7 +171,7 @@ static constexpr uint8_t expected_cfi_kMips[] = {
// 0x00000028: .cfi_restore: r31
// 0x00000028: addiu r29, r29, 64
// 0x0000002c: .cfi_def_cfa_offset: 0
-// 0x0000002c: jalr r0, r31
+// 0x0000002c: jr r31
// 0x00000030: nop
// 0x00000034: .cfi_restore_state
// 0x00000034: .cfi_def_cfa_offset: 64
@@ -180,7 +179,7 @@ static constexpr uint8_t expected_cfi_kMips[] = {
static constexpr uint8_t expected_asm_kMips64[] = {
0xE8, 0xFF, 0xBD, 0x67, 0x10, 0x00, 0xB2, 0xFF, 0x08, 0x00, 0xB3, 0xFF,
0x00, 0x00, 0xBF, 0xFF, 0xD8, 0xFF, 0xBD, 0x67, 0x25, 0x10, 0x80, 0x00,
- 0x00, 0x00, 0xA4, 0xAF, 0x38, 0x00, 0xB2, 0xDF, 0x30, 0x00, 0xB3, 0xDF,
+ 0x00, 0x00, 0xA4, 0xFF, 0x38, 0x00, 0xB2, 0xDF, 0x30, 0x00, 0xB3, 0xDF,
0x28, 0x00, 0xBF, 0xDF, 0x40, 0x00, 0xBD, 0x67, 0x09, 0x00, 0xE0, 0x03,
0x00, 0x00, 0x00, 0x00,
};
@@ -200,7 +199,7 @@ static constexpr uint8_t expected_cfi_kMips64[] = {
// 0x00000010: daddiu r29, r29, -40
// 0x00000014: .cfi_def_cfa_offset: 64
// 0x00000014: or r2, r4, r0
-// 0x00000018: sw r4, +0(r29)
+// 0x00000018: sd r4, +0(r29)
// 0x0000001c: .cfi_remember_state
// 0x0000001c: ld r18, +56(r29)
// 0x00000020: .cfi_restore: r18
@@ -214,4 +213,3 @@ static constexpr uint8_t expected_cfi_kMips64[] = {
// 0x00000030: nop
// 0x00000034: .cfi_restore_state
// 0x00000034: .cfi_def_cfa_offset: 64
-
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 7ca4382..58236e2 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -18,6 +18,7 @@
#include <cstdint>
+#include "art_method-inl.h"
#include "base/dumpable.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -37,7 +38,6 @@
#include "elf_writer_quick.h"
#include "jni/quick/jni_compiler.h"
#include "mir_to_lir.h"
-#include "mirror/art_method-inl.h"
#include "mirror/object.h"
#include "runtime.h"
@@ -787,7 +787,7 @@ CompiledMethod* QuickCompiler::JniCompile(uint32_t access_flags,
return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
}
-uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
+uintptr_t QuickCompiler::GetEntryPointOf(ArtMethod* method) const {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
index 8d2c324..43dd578 100644
--- a/compiler/dex/quick/quick_compiler.h
+++ b/compiler/dex/quick/quick_compiler.h
@@ -49,7 +49,7 @@ class QuickCompiler : public Compiler {
uint32_t method_idx,
const DexFile& dex_file) const OVERRIDE;
- uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
+ uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit);
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 2495757..43167a1 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -18,13 +18,13 @@
#include "codegen_x86.h"
+#include "art_method.h"
#include "base/logging.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "gc/accounting/card_table.h"
-#include "mirror/art_method.h"
#include "mirror/object_array-inl.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "x86_lir.h"
@@ -379,7 +379,8 @@ int X86Mir2Lir::X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
case 0: {
CHECK_EQ(cu->dex_file, target_method.dex_file);
size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index);
- cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, cg->TargetReg(kArg0, kRef));
+ cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, cg->TargetReg(kArg0, kRef),
+ cu->target64);
break;
}
default:
@@ -394,18 +395,20 @@ int X86Mir2Lir::X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
break;
case 1: // Get method->dex_cache_resolved_methods_
cg->LoadRefDisp(arg0_ref,
- mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
arg0_ref,
kNotVolatile);
break;
- case 2: // Grab target method*
+ case 2: {
+ // Grab target method*
CHECK_EQ(cu->dex_file, target_method.dex_file);
- cg->LoadRefDisp(arg0_ref,
- mirror::ObjectArray<mirror::Object>::OffsetOfElement(
- target_method.dex_method_index).Int32Value(),
- arg0_ref,
- kNotVolatile);
+ const size_t pointer_size = GetInstructionSetPointerSize(cu->instruction_set);
+ cg->LoadWordDisp(arg0_ref,
+ mirror::Array::DataOffset(pointer_size).Uint32Value() +
+ target_method.dex_method_index * pointer_size,
+ arg0_ref);
break;
+ }
default:
return -1;
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 5a46520..11d9d4a 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -105,7 +105,8 @@ class X86Mir2Lir FINAL : public Mir2Lir {
void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE;
- void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest) OVERRIDE;
+ void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, bool wide)
+ OVERRIDE;
void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 9bbb5f8..d993d93 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -18,11 +18,11 @@
#include "codegen_x86.h"
+#include "art_method.h"
#include "base/bit_utils.h"
#include "base/logging.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
-#include "mirror/art_method.h"
#include "mirror/array-inl.h"
#include "x86_lir.h"
@@ -1410,16 +1410,18 @@ RegStorage X86Mir2Lir::GetPcAndAnchor(LIR** anchor, RegStorage r_tmp) {
}
}
-void X86Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset,
- RegStorage r_dest) {
+void X86Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
+ bool wide) {
if (cu_->target64) {
- LIR* mov = NewLIR3(kX86Mov32RM, r_dest.GetReg(), kRIPReg, kDummy32BitOffset);
+ LIR* mov = NewLIR3(wide ? kX86Mov64RM : kX86Mov32RM, r_dest.GetReg(), kRIPReg,
+ kDummy32BitOffset);
mov->flags.fixup = kFixupLabel;
mov->operands[3] = WrapPointer(dex_file);
mov->operands[4] = offset;
mov->target = mov; // Used for pc_insn_offset (not used by x86-64 relative patcher).
dex_cache_access_insns_.push_back(mov);
} else {
+ CHECK(!wide) << "Unsupported";
// Get the PC to a register and get the anchor. Use r_dest for the temp if needed.
LIR* anchor;
RegStorage r_pc = GetPcAndAnchor(&anchor, r_dest);
@@ -3022,20 +3024,20 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
if (rl_method.location == kLocPhysReg) {
if (use_declaring_class) {
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadRefDisp(rl_method.reg, ArtMethod::DeclaringClassOffset().Int32Value(),
check_class, kNotVolatile);
} else {
- LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadRefDisp(rl_method.reg, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class, kNotVolatile);
LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
}
} else {
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
- LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadRefDisp(check_class, ArtMethod::DeclaringClassOffset().Int32Value(),
check_class, kNotVolatile);
} else {
- LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadRefDisp(check_class, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class, kNotVolatile);
LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
}
@@ -3059,7 +3061,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
}
void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_lhs, RegLocation rl_rhs, int flags) {
+ RegLocation rl_lhs, RegLocation rl_rhs, int flags) {
OpKind op = kOpBkpt;
bool is_div_rem = false;
bool unary = false;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 2f211da..c62cd47 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -21,6 +21,7 @@
#include <string>
#include "arch/instruction_set_features.h"
+#include "art_method.h"
#include "backend_x86.h"
#include "base/logging.h"
#include "dex/compiler_ir.h"
@@ -28,7 +29,6 @@
#include "dex/reg_storage_eq.h"
#include "driver/compiler_driver.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
#include "mirror/string.h"
#include "oat.h"
#include "x86_lir.h"
@@ -744,6 +744,7 @@ void X86Mir2Lir::SpillCoreRegs() {
const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask != 0u; mask >>= 1, reg++) {
if ((mask & 0x1) != 0u) {
+ DCHECK_NE(offset, 0) << "offset 0 should be for method";
RegStorage r_src = cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg);
StoreBaseDisp(rs_rSP, offset, r_src, size, kNotVolatile);
cfi_.RelOffset(DwarfCoreReg(cu_->target64, reg), offset);
@@ -1026,7 +1027,7 @@ LIR* X86Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType());
} else {
call_insn = OpMem(kOpBlx, TargetReg(kArg0, kRef),
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
cu_->target64 ? 8 : 4).Int32Value());
}
} else {
@@ -1103,7 +1104,7 @@ void X86Mir2Lir::InstallLiteralPools() {
// PC-relative references to dex cache arrays.
for (LIR* p : dex_cache_access_insns_) {
- DCHECK(p->opcode == kX86Mov32RM);
+ DCHECK(p->opcode == kX86Mov32RM || p->opcode == kX86Mov64RM);
const DexFile* dex_file = UnwrapPointer<DexFile>(p->operands[3]);
uint32_t offset = p->operands[4];
// The offset to patch is the last 4 bytes of the instruction.
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 57db015..d6a6a60 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -82,7 +82,7 @@ namespace art {
* | IN[ins-1] | {Note: resides in caller's frame}
* | . |
* | IN[0] |
- * | caller's Method* |
+ * | caller's ArtMethod* |
* +===========================+ {Note: start of callee's frame}
* | return address | {pushed by call}
* | spill region | {variable sized}
@@ -104,7 +104,7 @@ namespace art {
* | OUT[outs-2] |
* | . |
* | OUT[0] |
- * | StackReference<ArtMethod> | <<== sp w/ 16-byte alignment
+ * | ArtMethod* | <<== sp w/ 16-byte alignment
* +===========================+
*/
diff --git a/compiler/dex/type_inference.cc b/compiler/dex/type_inference.cc
index cd6467f..a0dfcbe 100644
--- a/compiler/dex/type_inference.cc
+++ b/compiler/dex/type_inference.cc
@@ -686,8 +686,8 @@ TypeInference::CheckCastData* TypeInference::InitializeCheckCastData(MIRGraph* m
void TypeInference::InitializeSRegs() {
std::fill_n(sregs_, num_sregs_, Type::Unknown());
- /* Treat ArtMethod* as a normal reference */
- sregs_[mir_graph_->GetMethodSReg()] = Type::NonArrayRefType();
+ /* Treat ArtMethod* specially since they are pointer sized */
+ sregs_[mir_graph_->GetMethodSReg()] = Type::ArtMethodType(cu_->target64);
// Initialize parameter SSA regs at method entry.
int32_t entry_param_s_reg = mir_graph_->GetFirstInVR();
diff --git a/compiler/dex/type_inference.h b/compiler/dex/type_inference.h
index 85f79af..adc3b54 100644
--- a/compiler/dex/type_inference.h
+++ b/compiler/dex/type_inference.h
@@ -81,6 +81,10 @@ class TypeInference : public DeletableArenaObject<kArenaAllocMisc> {
return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
}
+ static Type ArtMethodType(bool wide) {
+ return Type(kFlagLowWord | kFlagRef | (wide ? kFlagWide : kFlagNarrow));
+ }
+
static Type ObjectArrayType() {
return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
(1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayRef);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index e788261..ac7a4a7 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -20,12 +20,12 @@
#include <memory>
#include <vector>
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "dex_file.h"
#include "dex_instruction-inl.h"
#include "dex_instruction_utils.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
@@ -212,7 +212,7 @@ bool VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verif
if (is_virtual_quick || is_range_quick) {
uint32_t dex_pc = inst->GetDexPc(insns);
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- mirror::ArtMethod* method =
+ ArtMethod* method =
method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick, true);
if (method == nullptr) {
// It can be null if the line wasn't verified since it was unreachable.
@@ -284,20 +284,24 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
// We can't devirtualize abstract classes except on arrays of abstract classes.
continue;
}
- mirror::ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
- is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
+ auto* cl = Runtime::Current()->GetClassLinker();
+ size_t pointer_size = cl->GetImagePointerSize();
+ ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
+ is_range ? inst->VRegB_3rc() : inst->VRegB_35c(), pointer_size);
if (abstract_method == nullptr) {
// If the method is not found in the cache this means that it was never found
// by ResolveMethodAndCheckAccess() called when verifying invoke_*.
continue;
}
// Find the concrete method.
- mirror::ArtMethod* concrete_method = nullptr;
+ ArtMethod* concrete_method = nullptr;
if (is_interface) {
- concrete_method = reg_type.GetClass()->FindVirtualMethodForInterface(abstract_method);
+ concrete_method = reg_type.GetClass()->FindVirtualMethodForInterface(
+ abstract_method, pointer_size);
}
if (is_virtual) {
- concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(abstract_method);
+ concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(
+ abstract_method, pointer_size);
}
if (concrete_method == nullptr || concrete_method->IsAbstract()) {
// In cases where concrete_method is not found, or is abstract, continue to the next invoke.
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index e54cbf6..b25e967 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -20,8 +20,9 @@
#include "compiler_driver.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
+#include "class_linker-inl.h"
#include "dex_compilation_unit.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
#include "scoped_thread_state_change.h"
@@ -133,7 +134,7 @@ inline bool CompilerDriver::CanAccessResolvedMember(mirror::Class* referrer_clas
ArtMember* member ATTRIBUTE_UNUSED,
mirror::DexCache* dex_cache ATTRIBUTE_UNUSED,
uint32_t field_idx ATTRIBUTE_UNUSED) {
- // Not defined for ArtMember values other than ArtField or mirror::ArtMethod.
+ // Not defined for ArtMember values other than ArtField or ArtMethod.
UNREACHABLE();
}
@@ -147,10 +148,10 @@ inline bool CompilerDriver::CanAccessResolvedMember<ArtField>(mirror::Class* ref
}
template <>
-inline bool CompilerDriver::CanAccessResolvedMember<mirror::ArtMethod>(
+inline bool CompilerDriver::CanAccessResolvedMember<ArtMethod>(
mirror::Class* referrer_class,
mirror::Class* access_to,
- mirror::ArtMethod* method,
+ ArtMethod* method,
mirror::DexCache* dex_cache,
uint32_t field_idx) {
return referrer_class->CanAccessResolvedMethod(access_to, method, dex_cache, field_idx);
@@ -217,7 +218,7 @@ inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
inline bool CompilerDriver::IsClassOfStaticMethodAvailableToReferrer(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- mirror::ArtMethod* resolved_method, uint16_t method_idx, uint32_t* storage_index) {
+ ArtMethod* resolved_method, uint16_t method_idx, uint32_t* storage_index) {
std::pair<bool, bool> result = IsClassOfStaticMemberAvailableToReferrer(
dex_cache, referrer_class, resolved_method, method_idx, storage_index);
// Only the first member of `result` is meaningful, as there is no
@@ -239,15 +240,14 @@ inline bool CompilerDriver::IsStaticFieldsClassInitialized(mirror::Class* referr
return fields_class == referrer_class || fields_class->IsInitialized();
}
-inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
+inline ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
- *mUnit->GetDexFile(), method_idx, dex_cache, class_loader, NullHandle<mirror::ArtMethod>(),
- invoke_type);
+ ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
+ *mUnit->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type);
DCHECK_EQ(resolved_method == nullptr, soa.Self()->IsExceptionPending());
if (UNLIKELY(resolved_method == nullptr)) {
// Clean up any exception left by type resolution.
@@ -263,7 +263,7 @@ inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
}
inline void CompilerDriver::GetResolvedMethodDexFileLocation(
- mirror::ArtMethod* resolved_method, const DexFile** declaring_dex_file,
+ ArtMethod* resolved_method, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) {
mirror::Class* declaring_class = resolved_method->GetDeclaringClass();
*declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
@@ -272,7 +272,7 @@ inline void CompilerDriver::GetResolvedMethodDexFileLocation(
}
inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex(
- mirror::ArtMethod* resolved_method, InvokeType type) {
+ ArtMethod* resolved_method, InvokeType type) {
if (type == kVirtual || type == kSuper) {
return resolved_method->GetMethodIndex();
} else if (type == kInterface) {
@@ -285,7 +285,7 @@ inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex(
inline int CompilerDriver::IsFastInvoke(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
- mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
+ mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method) {
// Don't try to fast-path if we don't understand the caller's class.
@@ -305,10 +305,12 @@ inline int CompilerDriver::IsFastInvoke(
(*invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
// For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
// the super class.
+ const size_t pointer_size = InstructionSetPointerSize(GetInstructionSet());
bool can_sharpen_super_based_on_type = same_dex_file && (*invoke_type == kSuper) &&
(referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
resolved_method->GetMethodIndex() < methods_class->GetVTableLength() &&
- (methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method) &&
+ (methods_class->GetVTableEntry(
+ resolved_method->GetMethodIndex(), pointer_size) == resolved_method) &&
!resolved_method->IsAbstract();
if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
@@ -316,7 +318,8 @@ inline int CompilerDriver::IsFastInvoke(
// dex cache, check that this resolved method is where we expect it.
CHECK_EQ(target_method->dex_file, mUnit->GetDexFile());
DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index),
+ CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod(
+ target_method->dex_method_index, pointer_size),
resolved_method) << PrettyMethod(resolved_method);
int stats_flags = kFlagMethodResolved;
GetCodeAndMethodForDirectCall(/*out*/invoke_type,
@@ -336,21 +339,18 @@ inline int CompilerDriver::IsFastInvoke(
if ((*invoke_type == kVirtual || *invoke_type == kInterface) && devirt_target != nullptr) {
// Post-verification callback recorded a more precise invoke target based on its type info.
- mirror::ArtMethod* called_method;
+ ArtMethod* called_method;
ClassLinker* class_linker = mUnit->GetClassLinker();
if (LIKELY(devirt_target->dex_file == mUnit->GetDexFile())) {
- called_method = class_linker->ResolveMethod(*devirt_target->dex_file,
- devirt_target->dex_method_index, dex_cache,
- class_loader, NullHandle<mirror::ArtMethod>(),
- kVirtual);
+ called_method = class_linker->ResolveMethod(
+ *devirt_target->dex_file, devirt_target->dex_method_index, dex_cache, class_loader,
+ nullptr, kVirtual);
} else {
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> target_dex_cache(
- hs.NewHandle(class_linker->FindDexCache(*devirt_target->dex_file)));
- called_method = class_linker->ResolveMethod(*devirt_target->dex_file,
- devirt_target->dex_method_index,
- target_dex_cache, class_loader,
- NullHandle<mirror::ArtMethod>(), kVirtual);
+ auto target_dex_cache(hs.NewHandle(class_linker->FindDexCache(*devirt_target->dex_file)));
+ called_method = class_linker->ResolveMethod(
+ *devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache,
+ class_loader, nullptr, kVirtual);
}
CHECK(called_method != nullptr);
CHECK(!called_method->IsAbstract());
@@ -389,7 +389,7 @@ inline int CompilerDriver::IsFastInvoke(
}
inline bool CompilerDriver::IsMethodsClassInitialized(mirror::Class* referrer_class,
- mirror::ArtMethod* resolved_method) {
+ ArtMethod* resolved_method) {
if (!resolved_method->IsStatic()) {
return true;
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7cc5aae..e963c12 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -28,6 +28,7 @@
#endif
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/stl_util.h"
#include "base/time_utils.h"
#include "base/timing_logger.h"
@@ -50,8 +51,8 @@
#include "runtime.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
+#include "gc/space/image_space.h"
#include "gc/space/space.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
@@ -542,7 +543,7 @@ DexToDexCompilationLevel CompilerDriver::GetDexToDexCompilationlevel(
}
}
-void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings) {
+void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
jobject jclass_loader;
const DexFile* dex_file;
@@ -586,7 +587,7 @@ void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingL
self->TransitionFromSuspendedToRunnable();
}
-CompiledMethod* CompilerDriver::CompileMethod(Thread* self, mirror::ArtMethod* method) {
+CompiledMethod* CompilerDriver::CompileMethod(Thread* self, ArtMethod* method) {
const uint32_t method_idx = method->GetDexMethodIndex();
const uint32_t access_flags = method->GetAccessFlags();
const InvokeType invoke_type = method->GetInvokeType();
@@ -688,8 +689,8 @@ bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const
return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end();
}
-static void ResolveExceptionsForMethod(MutableHandle<mirror::ArtMethod> method_handle,
- std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
+static void ResolveExceptionsForMethod(
+ ArtMethod* method_handle, std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
@@ -728,17 +729,14 @@ static void ResolveExceptionsForMethod(MutableHandle<mirror::ArtMethod> method_h
static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::set<std::pair<uint16_t, const DexFile*>>* exceptions_to_resolve =
+ auto* exceptions_to_resolve =
reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg);
- StackHandleScope<1> hs(Thread::Current());
- MutableHandle<mirror::ArtMethod> method_handle(hs.NewHandle<mirror::ArtMethod>(nullptr));
- for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
- method_handle.Assign(c->GetVirtualMethod(i));
- ResolveExceptionsForMethod(method_handle, *exceptions_to_resolve);
+ const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ for (auto& m : c->GetVirtualMethods(pointer_size)) {
+ ResolveExceptionsForMethod(&m, *exceptions_to_resolve);
}
- for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
- method_handle.Assign(c->GetDirectMethod(i));
- ResolveExceptionsForMethod(method_handle, *exceptions_to_resolve);
+ for (auto& m : c->GetDirectMethods(pointer_size)) {
+ ResolveExceptionsForMethod(&m, *exceptions_to_resolve);
}
return true;
}
@@ -826,6 +824,7 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c,
// Make a copy of the handle so that we don't clobber it doing Assign.
MutableHandle<mirror::Class> klass(hs.NewHandle(c.Get()));
std::string temp;
+ const size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
while (!klass->IsObjectClass()) {
const char* descriptor = klass->GetDescriptor(&temp);
std::pair<std::unordered_set<std::string>::iterator, bool> result =
@@ -839,6 +838,12 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c,
MaybeAddToImageClasses(hs2.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)),
image_classes);
}
+ for (auto& m : c->GetVirtualMethods(pointer_size)) {
+ if (m.IsMiranda() || (true)) {
+ StackHandleScope<1> hs2(self);
+ MaybeAddToImageClasses(hs2.NewHandle(m.GetDeclaringClass()), image_classes);
+ }
+ }
if (klass->IsArrayClass()) {
StackHandleScope<1> hs2(self);
MaybeAddToImageClasses(hs2.NewHandle(klass->GetComponentType()), image_classes);
@@ -855,10 +860,7 @@ class ClinitImageUpdate {
Thread* self, ClassLinker* linker, std::string* error_msg) {
std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(image_class_descriptors, self,
linker));
- if (res->art_method_class_ == nullptr) {
- *error_msg = "Could not find ArtMethod class.";
- return nullptr;
- } else if (res->dex_cache_class_ == nullptr) {
+ if (res->dex_cache_class_ == nullptr) {
*error_msg = "Could not find DexCache class.";
return nullptr;
}
@@ -903,8 +905,6 @@ class ClinitImageUpdate {
old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure");
// Find the interesting classes.
- art_method_class_ = linker->LookupClass(self, "Ljava/lang/reflect/ArtMethod;",
- ComputeModifiedUtf8Hash("Ljava/lang/reflect/ArtMethod;"), nullptr);
dex_cache_class_ = linker->LookupClass(self, "Ljava/lang/DexCache;",
ComputeModifiedUtf8Hash("Ljava/lang/DexCache;"), nullptr);
@@ -922,7 +922,8 @@ class ClinitImageUpdate {
data->image_classes_.push_back(klass);
} else {
// Check whether it is initialized and has a clinit. They must be kept, too.
- if (klass->IsInitialized() && klass->FindClassInitializer() != nullptr) {
+ if (klass->IsInitialized() && klass->FindClassInitializer(
+ Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) {
data->image_classes_.push_back(klass);
}
}
@@ -950,9 +951,9 @@ class ClinitImageUpdate {
VisitClinitClassesObject(object->GetClass());
}
- // If it is not a dex cache or an ArtMethod, visit all references.
+ // If it is not a DexCache, visit all references.
mirror::Class* klass = object->GetClass();
- if (klass != art_method_class_ && klass != dex_cache_class_) {
+ if (klass != dex_cache_class_) {
object->VisitReferences<false /* visit class */>(*this, *this);
}
}
@@ -960,7 +961,6 @@ class ClinitImageUpdate {
mutable std::unordered_set<mirror::Object*> marked_objects_;
std::unordered_set<std::string>* const image_class_descriptors_;
std::vector<mirror::Class*> image_classes_;
- const mirror::Class* art_method_class_;
const mirror::Class* dex_cache_class_;
Thread* const self_;
const char* old_cause_;
@@ -1334,7 +1334,7 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
bool no_guarantee_of_dex_cache_entry,
const mirror::Class* referrer_class,
- mirror::ArtMethod* method,
+ ArtMethod* method,
int* stats_flags,
MethodReference* target_method,
uintptr_t* direct_code,
@@ -1347,6 +1347,8 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
*direct_method = 0;
Runtime* const runtime = Runtime::Current();
gc::Heap* const heap = runtime->GetHeap();
+ auto* cl = runtime->GetClassLinker();
+ const auto pointer_size = cl->GetImagePointerSize();
bool use_dex_cache = GetCompilerOptions().GetCompilePic(); // Off by default
const bool compiling_boot = heap->IsCompilingBoot();
// TODO This is somewhat hacky. We should refactor all of this invoke codepath.
@@ -1375,7 +1377,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
if (runtime->UseJit()) {
// If we are the JIT, then don't allow a direct call to the interpreter bridge since this will
// never be updated even after we compile the method.
- if (runtime->GetClassLinker()->IsQuickToInterpreterBridge(
+ if (cl->IsQuickToInterpreterBridge(
reinterpret_cast<const void*>(compiler_->GetEntryPointOf(method)))) {
use_dex_cache = true;
}
@@ -1389,8 +1391,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
is_in_image = IsImageClass(method->GetDeclaringClassDescriptor());
} else {
is_in_image = instruction_set_ != kX86 && instruction_set_ != kX86_64 &&
- Runtime::Current()->GetHeap()->FindSpaceFromObject(method->GetDeclaringClass(),
- false)->IsImageSpace();
+ heap->FindSpaceFromObject(method->GetDeclaringClass(), false)->IsImageSpace();
}
if (!is_in_image) {
// We can only branch directly to Methods that are resolved in the DexCache.
@@ -1403,14 +1404,14 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
bool must_use_direct_pointers = false;
mirror::DexCache* dex_cache = declaring_class->GetDexCache();
if (target_method->dex_file == dex_cache->GetDexFile() &&
- !(runtime->UseJit() && dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) == nullptr)) {
+ !(runtime->UseJit() && dex_cache->GetResolvedMethod(
+ method->GetDexMethodIndex(), pointer_size) == nullptr)) {
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
if (no_guarantee_of_dex_cache_entry) {
// See if the method is also declared in this dex cache.
- uint32_t dex_method_idx =
- method->FindDexMethodIndexInOtherDexFile(*target_method->dex_file,
- target_method->dex_method_index);
+ uint32_t dex_method_idx = method->FindDexMethodIndexInOtherDexFile(
+ *target_method->dex_file, target_method->dex_method_index);
if (dex_method_idx != DexFile::kDexNoIndex) {
target_method->dex_method_index = dex_method_idx;
} else {
@@ -1431,7 +1432,13 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
*type = sharp_type;
}
} else {
- bool method_in_image = heap->FindSpaceFromObject(method, false)->IsImageSpace();
+ auto* image_space = heap->GetImageSpace();
+ bool method_in_image = false;
+ if (image_space != nullptr) {
+ const auto& method_section = image_space->GetImageHeader().GetMethodsSection();
+ method_in_image = method_section.Contains(
+ reinterpret_cast<uint8_t*>(method) - image_space->Begin());
+ }
if (method_in_image || compiling_boot || runtime->UseJit()) {
// We know we must be able to get to the method in the image, so use that pointer.
// In the case where we are the JIT, we can always use direct pointers since we know where
@@ -1469,21 +1476,16 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
int stats_flags = 0;
ScopedObjectAccess soa(Thread::Current());
// Try to resolve the method and compiling method's class.
- mirror::ArtMethod* resolved_method;
- mirror::Class* referrer_class;
StackHandleScope<3> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(
hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
- {
- uint32_t method_idx = target_method->dex_method_index;
- Handle<mirror::ArtMethod> resolved_method_handle(hs.NewHandle(
- ResolveMethod(soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type)));
- referrer_class = (resolved_method_handle.Get() != nullptr)
- ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr;
- resolved_method = resolved_method_handle.Get();
- }
+ uint32_t method_idx = target_method->dex_method_index;
+ ArtMethod* resolved_method = ResolveMethod(
+ soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type);
+ auto h_referrer_class = hs.NewHandle(resolved_method != nullptr ?
+ ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr);
bool result = false;
if (resolved_method != nullptr) {
*vtable_idx = GetResolvedMethodVTableIndex(resolved_method, orig_invoke_type);
@@ -1492,13 +1494,13 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
const MethodReference* devirt_target = mUnit->GetVerifiedMethod()->GetDevirtTarget(dex_pc);
stats_flags = IsFastInvoke(
- soa, dex_cache, class_loader, mUnit, referrer_class, resolved_method,
+ soa, dex_cache, class_loader, mUnit, h_referrer_class.Get(), resolved_method,
invoke_type, target_method, devirt_target, direct_code, direct_method);
result = stats_flags != 0;
} else {
// Devirtualization not enabled. Inline IsFastInvoke(), dropping the devirtualization parts.
- if (UNLIKELY(referrer_class == nullptr) ||
- UNLIKELY(!referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(),
+ if (UNLIKELY(h_referrer_class.Get() == nullptr) ||
+ UNLIKELY(!h_referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(),
resolved_method, dex_cache.Get(),
target_method->dex_method_index)) ||
*invoke_type == kSuper) {
@@ -1506,8 +1508,9 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
} else {
// Sharpening failed so generate a regular resolved method dispatch.
stats_flags = kFlagMethodResolved;
- GetCodeAndMethodForDirectCall(invoke_type, *invoke_type, false, referrer_class, resolved_method,
- &stats_flags, target_method, direct_code, direct_method);
+ GetCodeAndMethodForDirectCall(
+ invoke_type, *invoke_type, false, h_referrer_class.Get(), resolved_method, &stats_flags,
+ target_method, direct_code, direct_method);
result = true;
}
}
@@ -1773,20 +1776,18 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
}
if (resolve_fields_and_methods) {
while (it.HasNextDirectMethod()) {
- mirror::ArtMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(),
- dex_cache, class_loader,
- NullHandle<mirror::ArtMethod>(),
- it.GetMethodInvokeType(class_def));
+ ArtMethod* method = class_linker->ResolveMethod(
+ dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
+ it.GetMethodInvokeType(class_def));
if (method == nullptr) {
CheckAndClearResolveException(soa.Self());
}
it.Next();
}
while (it.HasNextVirtualMethod()) {
- mirror::ArtMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(),
- dex_cache, class_loader,
- NullHandle<mirror::ArtMethod>(),
- it.GetMethodInvokeType(class_def));
+ ArtMethod* method = class_linker->ResolveMethod(
+ dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
+ it.GetMethodInvokeType(class_def));
if (method == nullptr) {
CheckAndClearResolveException(soa.Self());
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 2cc2409..68c905e 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -116,11 +116,11 @@ class CompilerDriver {
TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- CompiledMethod* CompileMethod(Thread* self, mirror::ArtMethod*)
+ CompiledMethod* CompileMethod(Thread* self, ArtMethod*)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED;
// Compile a single Method.
- void CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings)
+ void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
VerificationResults* GetVerificationResults() const {
@@ -288,7 +288,7 @@ class CompilerDriver {
// return DexFile::kDexNoIndex through `storage_index`.
bool IsClassOfStaticMethodAvailableToReferrer(mirror::DexCache* dex_cache,
mirror::Class* referrer_class,
- mirror::ArtMethod* resolved_method,
+ ArtMethod* resolved_method,
uint16_t method_idx,
uint32_t* storage_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -303,7 +303,7 @@ class CompilerDriver {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a method. Returns null on failure, including incompatible class change.
- mirror::ArtMethod* ResolveMethod(
+ ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
@@ -311,13 +311,13 @@ class CompilerDriver {
// Get declaration location of a resolved field.
void GetResolvedMethodDexFileLocation(
- mirror::ArtMethod* resolved_method, const DexFile** declaring_dex_file,
+ ArtMethod* resolved_method, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the index in the vtable of the method.
uint16_t GetResolvedMethodVTableIndex(
- mirror::ArtMethod* resolved_method, InvokeType type)
+ ArtMethod* resolved_method, InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
@@ -325,7 +325,7 @@ class CompilerDriver {
int IsFastInvoke(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
- mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
+ mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -333,7 +333,7 @@ class CompilerDriver {
// Is method's class initialized for an invoke?
// For static invokes to determine whether we need to consider potential call to <clinit>().
// For non-static invokes, assuming a non-null reference, the class is always initialized.
- bool IsMethodsClassInitialized(mirror::Class* referrer_class, mirror::ArtMethod* resolved_method)
+ bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the layout of dex cache arrays for a dex file. Returns invalid layout if the
@@ -526,7 +526,7 @@ class CompilerDriver {
InvokeType sharp_type,
bool no_guarantee_of_dex_cache_entry,
const mirror::Class* referrer_class,
- mirror::ArtMethod* method,
+ ArtMethod* method,
/*out*/int* stats_flags,
MethodReference* target_method,
uintptr_t* direct_code, uintptr_t* direct_method)
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 5085f32..ba03f5a 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -20,11 +20,11 @@
#include <stdio.h>
#include <memory>
+#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "common_compiler_test.h"
#include "dex_file.h"
#include "gc/heap.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
@@ -85,11 +85,12 @@ class CompilerDriverTest : public CommonCompilerTest {
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
CHECK(c != nullptr);
- for (size_t j = 0; j < c->NumDirectMethods(); j++) {
- MakeExecutable(c->GetDirectMethod(j));
+ const auto pointer_size = class_linker->GetImagePointerSize();
+ for (auto& m : c->GetDirectMethods(pointer_size)) {
+ MakeExecutable(&m);
}
- for (size_t j = 0; j < c->NumVirtualMethods(); j++) {
- MakeExecutable(c->GetVirtualMethod(j));
+ for (auto& m : c->GetVirtualMethods(pointer_size)) {
+ MakeExecutable(&m);
}
}
}
@@ -120,8 +121,10 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
<< " " << dex.GetTypeDescriptor(dex.GetTypeId(i));
}
EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods());
+ auto* cl = Runtime::Current()->GetClassLinker();
+ auto pointer_size = cl->GetImagePointerSize();
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
- mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i);
+ ArtMethod* method = dex_cache->GetResolvedMethod(i, pointer_size);
EXPECT_TRUE(method != nullptr) << "method_idx=" << i
<< " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
<< " " << dex.GetMethodName(dex.GetMethodId(i));
@@ -131,7 +134,7 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
}
EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields());
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(i, dex_cache);
+ ArtField* field = cl->GetResolvedField(i, dex_cache);
EXPECT_TRUE(field != nullptr) << "field_idx=" << i
<< " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
<< " " << dex.GetFieldName(dex.GetFieldId(i));
@@ -157,12 +160,15 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
// Create a jobj_ of ConcreteClass, NOT AbstractClass.
jclass c_class = env_->FindClass("ConcreteClass");
+
jmethodID constructor = env_->GetMethodID(c_class, "<init>", "()V");
+
jobject jobj_ = env_->NewObject(c_class, constructor);
ASSERT_TRUE(jobj_ != nullptr);
// Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception.
env_->CallNonvirtualVoidMethod(jobj_, class_, mid_);
+
EXPECT_EQ(env_->ExceptionCheck(), JNI_TRUE);
jthrowable exception = env_->ExceptionOccurred();
env_->ExceptionClear();
@@ -212,11 +218,10 @@ TEST_F(CompilerDriverMethodsTest, Selection) {
std::unique_ptr<std::unordered_set<std::string>> expected(GetCompiledMethods());
- for (int32_t i = 0; static_cast<uint32_t>(i) < klass->NumDirectMethods(); i++) {
- mirror::ArtMethod* m = klass->GetDirectMethod(i);
- std::string name = PrettyMethod(m, true);
- const void* code =
- m->GetEntryPointFromQuickCompiledCodePtrSize(InstructionSetPointerSize(kRuntimeISA));
+ const auto pointer_size = class_linker->GetImagePointerSize();
+ for (auto& m : klass->GetDirectMethods(pointer_size)) {
+ std::string name = PrettyMethod(&m, true);
+ const void* code = m.GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
ASSERT_NE(code, nullptr);
if (expected->find(name) != expected->end()) {
expected->erase(name);
diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc
index f75638d..4219d97 100644
--- a/compiler/elf_writer.cc
+++ b/compiler/elf_writer.cc
@@ -16,6 +16,7 @@
#include "elf_writer.h"
+#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "dex_file-inl.h"
@@ -23,7 +24,6 @@
#include "driver/compiler_driver.h"
#include "elf_file.h"
#include "invoke_type.h"
-#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
#include "oat.h"
#include "scoped_thread_state_change.h"
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index eaf3489..772cc80 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -105,14 +105,16 @@ TEST_F(ImageTest, WriteRead) {
<< oat_file.GetFilename();
}
+ uint64_t image_file_size;
{
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
ASSERT_TRUE(file.get() != nullptr);
ImageHeader image_header;
ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
ASSERT_TRUE(image_header.IsValid());
- ASSERT_GE(image_header.GetImageBitmapOffset(), sizeof(image_header));
- ASSERT_NE(0U, image_header.GetImageBitmapSize());
+ const auto& bitmap_section = image_header.GetImageSection(ImageHeader::kSectionImageBitmap);
+ ASSERT_GE(bitmap_section.Offset(), sizeof(image_header));
+ ASSERT_NE(0U, bitmap_section.Size());
gc::Heap* heap = Runtime::Current()->GetHeap();
ASSERT_TRUE(!heap->GetContinuousSpaces().empty());
@@ -120,7 +122,8 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_FALSE(space->IsImageSpace());
ASSERT_TRUE(space != nullptr);
ASSERT_TRUE(space->IsMallocSpace());
- ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->GetLength()));
+
+ image_file_size = file->GetLength();
}
ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
@@ -166,6 +169,9 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace());
gc::space::ImageSpace* image_space = heap->GetImageSpace();
+ ASSERT_TRUE(image_space != nullptr);
+ ASSERT_LE(image_space->Size(), image_file_size);
+
image_space->VerifyImageAllocations();
uint8_t* image_begin = image_space->Begin();
uint8_t* image_end = image_space->End();
@@ -195,25 +201,23 @@ TEST_F(ImageTest, WriteRead) {
TEST_F(ImageTest, ImageHeaderIsValid) {
uint32_t image_begin = ART_BASE_ADDRESS;
uint32_t image_size_ = 16 * KB;
- uint32_t image_bitmap_offset = 0;
- uint32_t image_bitmap_size = 0;
uint32_t image_roots = ART_BASE_ADDRESS + (1 * KB);
uint32_t oat_checksum = 0;
uint32_t oat_file_begin = ART_BASE_ADDRESS + (4 * KB); // page aligned
uint32_t oat_data_begin = ART_BASE_ADDRESS + (8 * KB); // page aligned
uint32_t oat_data_end = ART_BASE_ADDRESS + (9 * KB);
uint32_t oat_file_end = ART_BASE_ADDRESS + (10 * KB);
+ ImageSection sections[ImageHeader::kSectionCount];
ImageHeader image_header(image_begin,
image_size_,
- 0u, 0u,
- image_bitmap_offset,
- image_bitmap_size,
+ sections,
image_roots,
oat_checksum,
oat_file_begin,
oat_data_begin,
oat_data_end,
oat_file_end,
+ sizeof(void*),
/*compile_pic*/false);
ASSERT_TRUE(image_header.IsValid());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 195949b..dd62d94 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -23,6 +23,7 @@
#include <vector>
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
@@ -43,11 +44,12 @@
#include "intern_table.h"
#include "linear_alloc.h"
#include "lock_word.h"
-#include "mirror/art_method-inl.h"
+#include "mirror/abstract_method.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
@@ -58,10 +60,8 @@
#include "handle_scope-inl.h"
#include "utils/dex_cache_arrays_layout-inl.h"
-using ::art::mirror::ArtMethod;
using ::art::mirror::Class;
using ::art::mirror::DexCache;
-using ::art::mirror::EntryPointFromInterpreter;
using ::art::mirror::Object;
using ::art::mirror::ObjectArray;
using ::art::mirror::String;
@@ -169,10 +169,11 @@ bool ImageWriter::Write(const std::string& image_filename,
ElfWriter::GetOatElfInformation(oat_file.get(), &oat_loaded_size, &oat_data_offset);
Thread::Current()->TransitionFromSuspendedToRunnable();
+
CreateHeader(oat_loaded_size, oat_data_offset);
+ CopyAndFixupNativeData();
// TODO: heap validation can't handle these fix up passes.
Runtime::Current()->GetHeap()->DisableObjectValidation();
- CopyAndFixupNativeData();
CopyAndFixupObjects();
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
@@ -195,9 +196,8 @@ bool ImageWriter::Write(const std::string& image_filename,
return EXIT_FAILURE;
}
- // Write out the image + fields.
- const auto write_count = image_header->GetImageSize() + image_header->GetArtFieldsSize();
- CHECK_EQ(image_end_, image_header->GetImageSize());
+ // Write out the image + fields + methods.
+ const auto write_count = image_header->GetImageSize();
if (!image_file->WriteFully(image_->Begin(), write_count)) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
image_file->Erase();
@@ -205,17 +205,16 @@ bool ImageWriter::Write(const std::string& image_filename,
}
// Write out the image bitmap at the page aligned start of the image end.
- CHECK_ALIGNED(image_header->GetImageBitmapOffset(), kPageSize);
+ const auto& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap);
+ CHECK_ALIGNED(bitmap_section.Offset(), kPageSize);
if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()),
- image_header->GetImageBitmapSize(),
- image_header->GetImageBitmapOffset())) {
+ bitmap_section.Size(), bitmap_section.Offset())) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
image_file->Erase();
return false;
}
- CHECK_EQ(image_header->GetImageBitmapOffset() + image_header->GetImageBitmapSize(),
- static_cast<size_t>(image_file->GetLength()));
+ CHECK_EQ(bitmap_section.End(), static_cast<size_t>(image_file->GetLength()));
if (image_file->FlushCloseOrErase() != 0) {
PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
return false;
@@ -245,9 +244,16 @@ void ImageWriter::SetImageOffset(mirror::Object* object,
}
// The object is already deflated from when we set the bin slot. Just overwrite the lock word.
object->SetLockWord(LockWord::FromForwardingAddress(offset), false);
+ DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u);
DCHECK(IsImageOffsetAssigned(object));
}
+void ImageWriter::UpdateImageOffset(mirror::Object* obj, uintptr_t offset) {
+ DCHECK(IsImageOffsetAssigned(obj)) << obj << " " << offset;
+ obj->SetLockWord(LockWord::FromForwardingAddress(offset), false);
+ DCHECK_EQ(obj->GetLockWord(false).ReadBarrierState(), 0u);
+}
+
void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot bin_slot) {
DCHECK(object != nullptr);
DCHECK_NE(image_objects_offset_begin_, 0u);
@@ -304,6 +310,7 @@ void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) {
}
object->SetLockWord(LockWord::FromForwardingAddress(static_cast<uint32_t>(bin_slot)),
false);
+ DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u);
DCHECK(IsImageBinSlotAssigned(object));
}
@@ -324,16 +331,18 @@ void ImageWriter::PrepareDexCacheArraySlots() {
auto strings_size = layout.StringsSize(dex_file->NumStringIds());
dex_cache_array_indexes_.Put(
dex_cache->GetResolvedTypes(),
- DexCacheArrayLocation {size + layout.TypesOffset(), types_size});
+ DexCacheArrayLocation {size + layout.TypesOffset(), types_size, kBinRegular});
dex_cache_array_indexes_.Put(
dex_cache->GetResolvedMethods(),
- DexCacheArrayLocation {size + layout.MethodsOffset(), methods_size});
+ DexCacheArrayLocation {size + layout.MethodsOffset(), methods_size, kBinArtMethodClean});
+ AddMethodPointerArray(dex_cache->GetResolvedMethods());
dex_cache_array_indexes_.Put(
dex_cache->GetResolvedFields(),
- DexCacheArrayLocation {size + layout.FieldsOffset(), fields_size});
+ DexCacheArrayLocation {size + layout.FieldsOffset(), fields_size, kBinArtField});
+ pointer_arrays_.emplace(dex_cache->GetResolvedFields(), kBinArtField);
dex_cache_array_indexes_.Put(
dex_cache->GetStrings(),
- DexCacheArrayLocation {size + layout.StringsOffset(), strings_size});
+ DexCacheArrayLocation {size + layout.StringsOffset(), strings_size, kBinRegular});
size += layout.Size();
CHECK_EQ(layout.Size(), types_size + methods_size + fields_size + strings_size);
}
@@ -342,6 +351,23 @@ void ImageWriter::PrepareDexCacheArraySlots() {
bin_slot_sizes_[kBinDexCacheArray] = size;
}
+void ImageWriter::AddMethodPointerArray(mirror::PointerArray* arr) {
+ DCHECK(arr != nullptr);
+ if (kIsDebugBuild) {
+ for (size_t i = 0, len = arr->GetLength(); i < len; i++) {
+ auto* method = arr->GetElementPtrSize<ArtMethod*>(i, target_ptr_size_);
+ if (method != nullptr && !method->IsRuntimeMethod()) {
+ auto* klass = method->GetDeclaringClass();
+ CHECK(klass == nullptr || IsImageClass(klass)) << PrettyClass(klass)
+ << " should be an image class";
+ }
+ }
+ }
+ // kBinArtMethodClean picked arbitrarily, just required to differentiate between ArtFields and
+ // ArtMethods.
+ pointer_arrays_.emplace(arr, kBinArtMethodClean);
+}
+
void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
DCHECK(object != nullptr);
size_t object_size = object->SizeOf();
@@ -393,6 +419,20 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
bin = kBinClassVerified;
mirror::Class* klass = object->AsClass();
+ // Add non-embedded vtable to the pointer array table if there is one.
+ auto* vtable = klass->GetVTable();
+ if (vtable != nullptr) {
+ AddMethodPointerArray(vtable);
+ }
+ auto* iftable = klass->GetIfTable();
+ if (iftable != nullptr) {
+ for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+ if (iftable->GetMethodArrayCount(i) > 0) {
+ AddMethodPointerArray(iftable->GetMethodArray(i));
+ }
+ }
+ }
+
if (klass->GetStatus() == Class::kStatusInitialized) {
bin = kBinClassInitialized;
@@ -417,26 +457,11 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
}
}
}
- } else if (object->IsArtMethod<kVerifyNone>()) {
- mirror::ArtMethod* art_method = down_cast<ArtMethod*>(object);
- if (art_method->IsNative()) {
- bin = kBinArtMethodNative;
- } else {
- mirror::Class* declaring_class = art_method->GetDeclaringClass();
- if (declaring_class->GetStatus() != Class::kStatusInitialized) {
- bin = kBinArtMethodNotInitialized;
- } else {
- // This is highly unlikely to dirty since there's no entry points to mutate.
- bin = kBinArtMethodsManagedInitialized;
- }
- }
} else if (object->GetClass<kVerifyNone>()->IsStringClass()) {
bin = kBinString; // Strings are almost always immutable (except for object header).
} else if (object->IsArrayInstance()) {
mirror::Class* klass = object->GetClass<kVerifyNone>();
- auto* component_type = klass->GetComponentType();
- if (!component_type->IsPrimitive() || component_type->IsPrimitiveInt() ||
- component_type->IsPrimitiveLong()) {
+ if (klass->IsObjectArrayClass() || klass->IsIntArrayClass() || klass->IsLongArrayClass()) {
auto it = dex_cache_array_indexes_.find(object);
if (it != dex_cache_array_indexes_.end()) {
bin = kBinDexCacheArray;
@@ -451,6 +476,7 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
size_t offset_delta = RoundUp(object_size, kObjectAlignment); // 64-bit alignment
if (bin != kBinDexCacheArray) {
+ DCHECK(dex_cache_array_indexes_.find(object) == dex_cache_array_indexes_.end()) << object;
current_offset = bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned).
// Move the current bin size up to accomodate the object we just assigned a bin slot.
bin_slot_sizes_[bin] += offset_delta;
@@ -468,6 +494,15 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
DCHECK_LT(image_end_, image_->Size());
}
+bool ImageWriter::WillMethodBeDirty(ArtMethod* m) const {
+ if (m->IsNative()) {
+ return true;
+ }
+ mirror::Class* declaring_class = m->GetDeclaringClass();
+ // Initialized is highly unlikely to dirty since there's no entry points to mutate.
+ return declaring_class == nullptr || declaring_class->GetStatus() != Class::kStatusInitialized;
+}
+
bool ImageWriter::IsImageBinSlotAssigned(mirror::Object* object) const {
DCHECK(object != nullptr);
@@ -604,6 +639,9 @@ void ImageWriter::ComputeEagerResolvedStrings() {
}
bool ImageWriter::IsImageClass(Class* klass) {
+ if (klass == nullptr) {
+ return false;
+ }
std::string temp;
return compiler_driver_.IsImageClass(klass->GetDescriptor(&temp));
}
@@ -619,6 +657,7 @@ void ImageWriter::PruneNonImageClasses() {
}
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
+ Thread* self = Thread::Current();
// Make a list of classes we would like to prune.
std::set<std::string> non_image_classes;
@@ -634,27 +673,45 @@ void ImageWriter::PruneNonImageClasses() {
}
// Clear references to removed classes from the DexCaches.
- ArtMethod* resolution_method = runtime->GetResolutionMethod();
- ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
- size_t dex_cache_count = class_linker->GetDexCacheCount();
+ const ArtMethod* resolution_method = runtime->GetResolutionMethod();
+ size_t dex_cache_count;
+ {
+ ReaderMutexLock mu(self, *class_linker->DexLock());
+ dex_cache_count = class_linker->GetDexCacheCount();
+ }
for (size_t idx = 0; idx < dex_cache_count; ++idx) {
- DexCache* dex_cache = class_linker->GetDexCache(idx);
+ DexCache* dex_cache;
+ {
+ ReaderMutexLock mu(self, *class_linker->DexLock());
+ dex_cache = class_linker->GetDexCache(idx);
+ }
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
Class* klass = dex_cache->GetResolvedType(i);
if (klass != nullptr && !IsImageClass(klass)) {
dex_cache->SetResolvedType(i, nullptr);
}
}
- for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
- ArtMethod* method = dex_cache->GetResolvedMethod(i);
- if (method != nullptr && !IsImageClass(method->GetDeclaringClass())) {
- dex_cache->SetResolvedMethod(i, resolution_method);
+ auto* resolved_methods = down_cast<mirror::PointerArray*>(dex_cache->GetResolvedMethods());
+ for (size_t i = 0, len = resolved_methods->GetLength(); i < len; i++) {
+ auto* method = resolved_methods->GetElementPtrSize<ArtMethod*>(i, target_ptr_size_);
+ if (method != nullptr) {
+ auto* declaring_class = method->GetDeclaringClass();
+ // Miranda methods may be held live by a class which was not an image class but have a
+ // declaring class which is an image class. Set it to the resolution method to be safe and
+ // prevent dangling pointers.
+ if (method->IsMiranda() || !IsImageClass(declaring_class)) {
+ resolved_methods->SetElementPtrSize(i, resolution_method, target_ptr_size_);
+ } else {
+ // Check that the class is still in the classes table.
+ DCHECK(class_linker->ClassInClassTable(declaring_class)) << "Class "
+ << PrettyClass(declaring_class) << " not in class linker table";
+ }
}
}
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- ArtField* field = dex_cache->GetResolvedField(i, sizeof(void*));
+ ArtField* field = dex_cache->GetResolvedField(i, target_ptr_size_);
if (field != nullptr && !IsImageClass(field->GetDeclaringClass())) {
- dex_cache->SetResolvedField(i, nullptr, sizeof(void*));
+ dex_cache->SetResolvedField(i, nullptr, target_ptr_size_);
}
}
// Clean the dex field. It might have been populated during the initialization phase, but
@@ -757,19 +814,8 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
}
// build an Object[] of the roots needed to restore the runtime
- Handle<ObjectArray<Object>> image_roots(hs.NewHandle(
+ auto image_roots(hs.NewHandle(
ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax)));
- image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
- image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
- image_roots->Set<false>(ImageHeader::kImtUnimplementedMethod,
- runtime->GetImtUnimplementedMethod());
- image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
- image_roots->Set<false>(ImageHeader::kCalleeSaveMethod,
- runtime->GetCalleeSaveMethod(Runtime::kSaveAll));
- image_roots->Set<false>(ImageHeader::kRefsOnlySaveMethod,
- runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
- image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod,
- runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
@@ -816,7 +862,7 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
// Walk static fields of a Class.
if (h_obj->IsClass()) {
size_t num_reference_static_fields = klass->NumReferenceStaticFields();
- MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset();
+ MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset(target_ptr_size_);
for (size_t i = 0; i < num_reference_static_fields; ++i) {
mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
@@ -825,21 +871,38 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
field_offset = MemberOffset(field_offset.Uint32Value() +
sizeof(mirror::HeapReference<mirror::Object>));
}
-
// Visit and assign offsets for fields.
- ArtField* fields[2] = { h_obj->AsClass()->GetSFields(), h_obj->AsClass()->GetIFields() };
- size_t num_fields[2] = { h_obj->AsClass()->NumStaticFields(),
- h_obj->AsClass()->NumInstanceFields() };
+ auto* as_klass = h_obj->AsClass();
+ ArtField* fields[] = { as_klass->GetSFields(), as_klass->GetIFields() };
+ size_t num_fields[] = { as_klass->NumStaticFields(), as_klass->NumInstanceFields() };
for (size_t i = 0; i < 2; ++i) {
for (size_t j = 0; j < num_fields[i]; ++j) {
auto* field = fields[i] + j;
- auto it = art_field_reloc_.find(field);
- CHECK(it == art_field_reloc_.end()) << "Field at index " << i << ":" << j
+ auto it = native_object_reloc_.find(field);
+ CHECK(it == native_object_reloc_.end()) << "Field at index " << i << ":" << j
<< " already assigned " << PrettyField(field);
- art_field_reloc_.emplace(field, bin_slot_sizes_[kBinArtField]);
+ native_object_reloc_.emplace(
+ field, NativeObjectReloc { bin_slot_sizes_[kBinArtField], kBinArtField });
bin_slot_sizes_[kBinArtField] += sizeof(ArtField);
}
}
+ // Visit and assign offsets for methods.
+ IterationRange<StrideIterator<ArtMethod>> method_arrays[] = {
+ as_klass->GetDirectMethods(target_ptr_size_),
+ as_klass->GetVirtualMethods(target_ptr_size_)
+ };
+ for (auto& array : method_arrays) {
+ bool any_dirty = false;
+ size_t count = 0;
+ for (auto& m : array) {
+ any_dirty = any_dirty || WillMethodBeDirty(&m);
+ ++count;
+ }
+ for (auto& m : array) {
+ AssignMethodOffset(&m, any_dirty ? kBinArtMethodDirty : kBinArtMethodClean);
+ }
+ (any_dirty ? dirty_methods_ : clean_methods_) += count;
+ }
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
@@ -854,6 +917,14 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
}
}
+void ImageWriter::AssignMethodOffset(ArtMethod* method, Bin bin) {
+ auto it = native_object_reloc_.find(method);
+ CHECK(it == native_object_reloc_.end()) << "Method " << method << " already assigned "
+ << PrettyMethod(method);
+ native_object_reloc_.emplace(method, NativeObjectReloc { bin_slot_sizes_[bin], bin });
+ bin_slot_sizes_[bin] += ArtMethod::ObjectSize(target_ptr_size_);
+}
+
void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) {
ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
DCHECK(writer != nullptr);
@@ -879,11 +950,12 @@ void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) {
}
void ImageWriter::CalculateNewObjectOffsets() {
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
StackHandleScope<1> hs(self);
Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots()));
- gc::Heap* heap = Runtime::Current()->GetHeap();
+ auto* runtime = Runtime::Current();
+ auto* heap = runtime->GetHeap();
DCHECK_EQ(0U, image_end_);
// Leave space for the header, but do not write it yet, we need to
@@ -896,6 +968,21 @@ void ImageWriter::CalculateNewObjectOffsets() {
PrepareDexCacheArraySlots();
// Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
heap->VisitObjects(WalkFieldsCallback, this);
+ // Write the image runtime methods.
+ image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod();
+ image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod();
+ image_methods_[ImageHeader::kImtUnimplementedMethod] = runtime->GetImtUnimplementedMethod();
+ image_methods_[ImageHeader::kCalleeSaveMethod] = runtime->GetCalleeSaveMethod(Runtime::kSaveAll);
+ image_methods_[ImageHeader::kRefsOnlySaveMethod] =
+ runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
+ image_methods_[ImageHeader::kRefsAndArgsSaveMethod] =
+ runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ for (auto* m : image_methods_) {
+ CHECK(m != nullptr);
+ CHECK(m->IsRuntimeMethod());
+ AssignMethodOffset(m, kBinArtMethodDirty);
+ }
+
// Calculate cumulative bin slot sizes.
size_t previous_sizes = 0u;
for (size_t i = 0; i != kBinSize; ++i) {
@@ -913,7 +1000,14 @@ void ImageWriter::CalculateNewObjectOffsets() {
image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots.Get()));
- // Note that image_end_ is left at end of used mirror space
+ // Update the native relocations by adding their bin sums.
+ for (auto& pair : native_object_reloc_) {
+ auto& native_reloc = pair.second;
+ native_reloc.offset += image_objects_offset_begin_ +
+ bin_slot_previous_sizes_[native_reloc.bin_type];
+ }
+
+ // Note that image_end_ is left at end of used mirror object section.
}
void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
@@ -922,47 +1016,87 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size;
oat_data_begin_ = oat_file_begin + oat_data_offset;
const uint8_t* oat_data_end = oat_data_begin_ + oat_file_->Size();
- // Write out sections.
- size_t cur_pos = image_end_;
- // Add fields.
- auto fields_offset = cur_pos;
- CHECK_EQ(image_objects_offset_begin_ + GetBinSizeSum(kBinArtField), fields_offset);
- auto fields_size = bin_slot_sizes_[kBinArtField];
- cur_pos += fields_size;
- // Return to write header at start of image with future location of image_roots. At this point,
- // image_end_ is the size of the image (excluding bitmaps, ArtFields).
- /*
- const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * kObjectAlignment;
- const size_t bitmap_bytes = RoundUp(image_end_, heap_bytes_per_bitmap_byte) /
- heap_bytes_per_bitmap_byte;
- */
+
+ // Create the image sections.
+ ImageSection sections[ImageHeader::kSectionCount];
+ // Objects section
+ auto* objects_section = &sections[ImageHeader::kSectionObjects];
+ *objects_section = ImageSection(0u, image_end_);
+ size_t cur_pos = objects_section->End();
+ // Add field section.
+ auto* field_section = &sections[ImageHeader::kSectionArtFields];
+ *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
+ CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtField],
+ field_section->Offset());
+ cur_pos = field_section->End();
+ // Add method section.
+ auto* methods_section = &sections[ImageHeader::kSectionArtMethods];
+ *methods_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtMethodClean] +
+ bin_slot_sizes_[kBinArtMethodDirty]);
+ CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtMethodClean],
+ methods_section->Offset());
+ cur_pos = methods_section->End();
+ // Finally bitmap section.
const size_t bitmap_bytes = image_bitmap_->Size();
- auto bitmap_offset = RoundUp(cur_pos, kPageSize);
- auto bitmap_size = RoundUp(bitmap_bytes, kPageSize);
- cur_pos += bitmap_size;
- new (image_->Begin()) ImageHeader(PointerToLowMemUInt32(image_begin_),
- static_cast<uint32_t>(image_end_),
- fields_offset, fields_size,
- bitmap_offset, bitmap_size,
- image_roots_address_,
- oat_file_->GetOatHeader().GetChecksum(),
- PointerToLowMemUInt32(oat_file_begin),
- PointerToLowMemUInt32(oat_data_begin_),
- PointerToLowMemUInt32(oat_data_end),
- PointerToLowMemUInt32(oat_file_end),
- compile_pic_);
+ auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
+ *bitmap_section = ImageSection(RoundUp(cur_pos, kPageSize), RoundUp(bitmap_bytes, kPageSize));
+ cur_pos = bitmap_section->End();
+ if (kIsDebugBuild) {
+ size_t idx = 0;
+ for (auto& section : sections) {
+ LOG(INFO) << static_cast<ImageHeader::ImageSections>(idx) << " " << section;
+ ++idx;
+ }
+ LOG(INFO) << "Methods: clean=" << clean_methods_ << " dirty=" << dirty_methods_;
+ }
+ // Create the header.
+ new (image_->Begin()) ImageHeader(
+ PointerToLowMemUInt32(image_begin_), static_cast<uint32_t>(methods_section->End()), sections,
+ image_roots_address_, oat_file_->GetOatHeader().GetChecksum(),
+ PointerToLowMemUInt32(oat_file_begin), PointerToLowMemUInt32(oat_data_begin_),
+ PointerToLowMemUInt32(oat_data_end), PointerToLowMemUInt32(oat_file_end), target_ptr_size_,
+ compile_pic_);
+}
+
+ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
+ auto it = native_object_reloc_.find(method);
+ CHECK(it != native_object_reloc_.end()) << PrettyMethod(method) << " @ " << method;
+ CHECK_GE(it->second.offset, image_end_) << "ArtMethods should be after Objects";
+ return reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
}
void ImageWriter::CopyAndFixupNativeData() {
- // Copy ArtFields to their locations and update the array for convenience.
- auto fields_offset = image_objects_offset_begin_ + GetBinSizeSum(kBinArtField);
- for (auto& pair : art_field_reloc_) {
- pair.second += fields_offset;
- auto* dest = image_->Begin() + pair.second;
- DCHECK_GE(dest, image_->Begin() + image_end_);
- memcpy(dest, pair.first, sizeof(ArtField));
- reinterpret_cast<ArtField*>(dest)->SetDeclaringClass(
- down_cast<Class*>(GetImageAddress(pair.first->GetDeclaringClass())));
+ // Copy ArtFields and methods to their locations and update the array for convenience.
+ for (auto& pair : native_object_reloc_) {
+ auto& native_reloc = pair.second;
+ if (native_reloc.bin_type == kBinArtField) {
+ auto* dest = image_->Begin() + native_reloc.offset;
+ DCHECK_GE(dest, image_->Begin() + image_end_);
+ memcpy(dest, pair.first, sizeof(ArtField));
+ reinterpret_cast<ArtField*>(dest)->SetDeclaringClass(
+ GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass()));
+ } else {
+ CHECK(IsArtMethodBin(native_reloc.bin_type)) << native_reloc.bin_type;
+ auto* dest = image_->Begin() + native_reloc.offset;
+ DCHECK_GE(dest, image_->Begin() + image_end_);
+ CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first),
+ reinterpret_cast<ArtMethod*>(dest));
+ }
+ }
+ // Fixup the image method roots.
+ auto* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
+ const auto& methods_section = image_header->GetMethodsSection();
+ for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
+ auto* m = image_methods_[i];
+ CHECK(m != nullptr);
+ auto it = native_object_reloc_.find(m);
+ CHECK(it != native_object_reloc_.end()) << "No fowarding for " << PrettyMethod(m);
+ auto& native_reloc = it->second;
+ CHECK(methods_section.Contains(native_reloc.offset)) << native_reloc.offset << " not in "
+ << methods_section;
+ CHECK(IsArtMethodBin(native_reloc.bin_type)) << native_reloc.bin_type;
+ auto* dest = reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
+ image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), dest);
}
}
@@ -984,58 +1118,37 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
reinterpret_cast<ImageWriter*>(arg)->CopyAndFixupObject(obj);
}
-bool ImageWriter::CopyAndFixupIfDexCacheFieldArray(mirror::Object* dst, mirror::Object* obj,
- mirror::Class* klass) {
- if (!klass->IsArrayClass()) {
- return false;
- }
- auto* component_type = klass->GetComponentType();
- bool is_int_arr = component_type->IsPrimitiveInt();
- bool is_long_arr = component_type->IsPrimitiveLong();
- if (!is_int_arr && !is_long_arr) {
- return false;
- }
- auto it = dex_cache_array_indexes_.find(obj); // Is this a dex cache array?
- if (it == dex_cache_array_indexes_.end()) {
- return false;
- }
- mirror::Array* arr = obj->AsArray();
- CHECK_EQ(reinterpret_cast<Object*>(
- image_->Begin() + it->second.offset_ + image_objects_offset_begin_), dst);
- dex_cache_array_indexes_.erase(it);
- // Fixup int pointers for the field array.
- CHECK(!arr->IsObjectArray());
+void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr,
+ mirror::Class* klass, Bin array_type) {
+ CHECK(klass->IsArrayClass());
+ CHECK(arr->IsIntArray() || arr->IsLongArray()) << PrettyClass(klass) << " " << arr;
+ // Fixup int and long pointers for the ArtMethod or ArtField arrays.
const size_t num_elements = arr->GetLength();
- if (target_ptr_size_ == 4) {
- // Will get fixed up by fixup object.
- dst->SetClass(down_cast<mirror::Class*>(
- GetImageAddress(mirror::IntArray::GetArrayClass())));
- } else {
- DCHECK_EQ(target_ptr_size_, 8u);
- dst->SetClass(down_cast<mirror::Class*>(
- GetImageAddress(mirror::LongArray::GetArrayClass())));
- }
- mirror::Array* dest_array = down_cast<mirror::Array*>(dst);
- dest_array->SetLength(num_elements);
+ dst->SetClass(GetImageAddress(arr->GetClass()));
+ auto* dest_array = down_cast<mirror::PointerArray*>(dst);
for (size_t i = 0, count = num_elements; i < count; ++i) {
- ArtField* field = reinterpret_cast<ArtField*>(is_int_arr ?
- arr->AsIntArray()->GetWithoutChecks(i) : arr->AsLongArray()->GetWithoutChecks(i));
- uint8_t* fixup_location = nullptr;
- if (field != nullptr) {
- auto it2 = art_field_reloc_.find(field);
- CHECK(it2 != art_field_reloc_.end()) << "No relocation for field " << PrettyField(field);
- fixup_location = image_begin_ + it2->second;
- }
- if (target_ptr_size_ == 4) {
- down_cast<mirror::IntArray*>(dest_array)->SetWithoutChecks<kVerifyNone>(
- i, static_cast<uint32_t>(reinterpret_cast<uint64_t>(fixup_location)));
- } else {
- down_cast<mirror::LongArray*>(dest_array)->SetWithoutChecks<kVerifyNone>(
- i, reinterpret_cast<uint64_t>(fixup_location));
+ auto* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_);
+ if (elem != nullptr) {
+ auto it = native_object_reloc_.find(elem);
+ if (it == native_object_reloc_.end()) {
+ if (IsArtMethodBin(array_type)) {
+ auto* method = reinterpret_cast<ArtMethod*>(elem);
+ LOG(FATAL) << "No relocation entry for ArtMethod " << PrettyMethod(method) << " @ "
+ << method << " idx=" << i << "/" << num_elements << " with declaring class "
+ << PrettyClass(method->GetDeclaringClass());
+ } else {
+ CHECK_EQ(array_type, kBinArtField);
+ auto* field = reinterpret_cast<ArtField*>(elem);
+ LOG(FATAL) << "No relocation entry for ArtField " << PrettyField(field) << " @ "
+ << field << " idx=" << i << "/" << num_elements << " with declaring class "
+ << PrettyClass(field->GetDeclaringClass());
+ }
+ } else {
+ elem = image_begin_ + it->second.offset;
+ }
}
+ dest_array->SetElementPtrSize<false, true>(i, elem, target_ptr_size_);
}
- dst->SetLockWord(LockWord::Default(), false);
- return true;
}
void ImageWriter::CopyAndFixupObject(Object* obj) {
@@ -1043,19 +1156,8 @@ void ImageWriter::CopyAndFixupObject(Object* obj) {
size_t offset = GetImageOffset(obj);
auto* dst = reinterpret_cast<Object*>(image_->Begin() + offset);
const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
- size_t n;
- mirror::Class* klass = obj->GetClass();
- if (CopyAndFixupIfDexCacheFieldArray(dst, obj, klass)) {
- return;
- }
- if (klass->IsArtMethodClass()) {
- // Size without pointer fields since we don't want to overrun the buffer if target art method
- // is 32 bits but source is 64 bits.
- n = mirror::ArtMethod::SizeWithoutPointerFields(target_ptr_size_);
- } else {
- n = obj->SizeOf();
- }
+ size_t n = obj->SizeOf();
DCHECK_LE(offset + n, image_->Size());
memcpy(dst, src, n);
@@ -1114,23 +1216,51 @@ class FixupClassVisitor FINAL : public FixupVisitor {
void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
// Copy and fix up ArtFields in the class.
- ArtField* fields[2] = { orig->AsClass()->GetSFields(), orig->AsClass()->GetIFields() };
- size_t num_fields[2] = { orig->AsClass()->NumStaticFields(),
- orig->AsClass()->NumInstanceFields() };
- // Update the arrays.
+ ArtField* fields[2] = { orig->GetSFields(), orig->GetIFields() };
+ size_t num_fields[2] = { orig->NumStaticFields(), orig->NumInstanceFields() };
+ // Update the field arrays.
for (size_t i = 0; i < 2; ++i) {
if (num_fields[i] == 0) {
CHECK(fields[i] == nullptr);
continue;
}
- auto it = art_field_reloc_.find(fields[i]);
- CHECK(it != art_field_reloc_.end()) << PrettyClass(orig->AsClass()) << " : "
- << PrettyField(fields[i]);
- auto* image_fields = reinterpret_cast<ArtField*>(image_begin_ + it->second);
+ auto it = native_object_reloc_.find(fields[i]);
+ CHECK(it != native_object_reloc_.end()) << PrettyClass(orig) << " : " << PrettyField(fields[i]);
+ auto* image_fields = reinterpret_cast<ArtField*>(image_begin_ + it->second.offset);
if (i == 0) {
- down_cast<Class*>(copy)->SetSFieldsUnchecked(image_fields);
+ copy->SetSFieldsUnchecked(image_fields);
} else {
- down_cast<Class*>(copy)->SetIFieldsUnchecked(image_fields);
+ copy->SetIFieldsUnchecked(image_fields);
+ }
+ }
+ // Update direct / virtual method arrays.
+ auto* direct_methods = orig->GetDirectMethodsPtr();
+ if (direct_methods != nullptr) {
+ auto it = native_object_reloc_.find(direct_methods);
+ CHECK(it != native_object_reloc_.end()) << PrettyClass(orig);
+ copy->SetDirectMethodsPtrUnchecked(
+ reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset));
+ }
+ auto* virtual_methods = orig->GetVirtualMethodsPtr();
+ if (virtual_methods != nullptr) {
+ auto it = native_object_reloc_.find(virtual_methods);
+ CHECK(it != native_object_reloc_.end()) << PrettyClass(orig);
+ copy->SetVirtualMethodsPtr(
+ reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset));
+ }
+ // Fix up embedded tables.
+ if (orig->ShouldHaveEmbeddedImtAndVTable()) {
+ for (int32_t i = 0; i < orig->GetEmbeddedVTableLength(); ++i) {
+ auto it = native_object_reloc_.find(orig->GetEmbeddedVTableEntry(i, target_ptr_size_));
+ CHECK(it != native_object_reloc_.end()) << PrettyClass(orig);
+ copy->SetEmbeddedVTableEntryUnchecked(
+ i, reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset), target_ptr_size_);
+ }
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ auto it = native_object_reloc_.find(orig->GetEmbeddedImTableEntry(i, target_ptr_size_));
+ CHECK(it != native_object_reloc_.end()) << PrettyClass(orig);
+ copy->SetEmbeddedImTableEntry(
+ i, reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset), target_ptr_size_);
}
}
FixupClassVisitor visitor(this, copy);
@@ -1148,18 +1278,39 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig));
}
}
+ auto* klass = orig->GetClass();
+ if (klass->IsIntArrayClass() || klass->IsLongArrayClass()) {
+ // Is this a native dex cache array?
+ auto it = pointer_arrays_.find(down_cast<mirror::PointerArray*>(orig));
+ if (it != pointer_arrays_.end()) {
+ // Should only need to fixup every pointer array exactly once.
+ FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second);
+ pointer_arrays_.erase(it);
+ return;
+ }
+ CHECK(dex_cache_array_indexes_.find(orig) == dex_cache_array_indexes_.end())
+ << "Should have been pointer array.";
+ }
if (orig->IsClass()) {
FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy));
} else {
+ if (klass == mirror::Method::StaticClass() || klass == mirror::Constructor::StaticClass()) {
+ // Need to go update the ArtMethod.
+ auto* dest = down_cast<mirror::AbstractMethod*>(copy);
+ auto* src = down_cast<mirror::AbstractMethod*>(orig);
+ ArtMethod* src_method = src->GetArtMethod();
+ auto it = native_object_reloc_.find(src_method);
+ CHECK(it != native_object_reloc_.end()) << "Missing relocation for AbstractMethod.artMethod "
+ << PrettyMethod(src_method);
+ dest->SetArtMethod(
+ reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset));
+ }
FixupVisitor visitor(this, copy);
orig->VisitReferences<true /*visit class*/>(visitor, visitor);
}
- if (orig->IsArtMethod<kVerifyNone>()) {
- FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy));
- }
}
-const uint8_t* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) {
+const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, bool* quick_is_interpreted) {
DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() &&
!method->IsImtUnimplementedMethod() && !method->IsAbstract()) << PrettyMethod(method);
@@ -1171,27 +1322,31 @@ const uint8_t* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_
method->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_));
const uint8_t* quick_code = GetOatAddress(quick_oat_code_offset);
*quick_is_interpreted = false;
- if (quick_code != nullptr &&
- (!method->IsStatic() || method->IsConstructor() || method->GetDeclaringClass()->IsInitialized())) {
+ if (quick_code != nullptr && (!method->IsStatic() || method->IsConstructor() ||
+ method->GetDeclaringClass()->IsInitialized())) {
// We have code for a non-static or initialized method, just use the code.
+ DCHECK_GE(quick_code, oat_data_begin_);
} else if (quick_code == nullptr && method->IsNative() &&
(!method->IsStatic() || method->GetDeclaringClass()->IsInitialized())) {
// Non-static or initialized native method missing compiled code, use generic JNI version.
quick_code = GetOatAddress(quick_generic_jni_trampoline_offset_);
+ DCHECK_GE(quick_code, oat_data_begin_);
} else if (quick_code == nullptr && !method->IsNative()) {
// We don't have code at all for a non-native method, use the interpreter.
quick_code = GetOatAddress(quick_to_interpreter_bridge_offset_);
*quick_is_interpreted = true;
+ DCHECK_GE(quick_code, oat_data_begin_);
} else {
CHECK(!method->GetDeclaringClass()->IsInitialized());
// We have code for a static method, but need to go through the resolution stub for class
// initialization.
quick_code = GetOatAddress(quick_resolution_trampoline_offset_);
+ DCHECK_GE(quick_code, oat_data_begin_);
}
return quick_code;
}
-const uint8_t* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
+const uint8_t* ImageWriter::GetQuickEntryPoint(ArtMethod* method) {
// Calculate the quick entry point following the same logic as FixupMethod() below.
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
@@ -1213,50 +1368,57 @@ const uint8_t* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
}
}
-void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
+void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) {
+ memcpy(copy, orig, ArtMethod::ObjectSize(target_ptr_size_));
+
+ copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked()));
+ copy->SetDexCacheResolvedMethods(GetImageAddress(orig->GetDexCacheResolvedMethods()));
+ copy->SetDexCacheResolvedTypes(GetImageAddress(orig->GetDexCacheResolvedTypes()));
+
// OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
// oat_begin_
- // For 64 bit targets we need to repack the current runtime pointer sized fields to the right
- // locations.
- // Copy all of the fields from the runtime methods to the target methods first since we did a
- // bytewise copy earlier.
- copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
- orig->GetEntryPointFromInterpreterPtrSize(target_ptr_size_), target_ptr_size_);
- copy->SetEntryPointFromJniPtrSize<kVerifyNone>(
- orig->GetEntryPointFromJniPtrSize(target_ptr_size_), target_ptr_size_);
- copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
- orig->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_), target_ptr_size_);
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(
GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_);
} else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
orig == runtime->GetImtUnimplementedMethod())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(
GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_);
+ } else if (UNLIKELY(orig->IsRuntimeMethod())) {
+ bool found_one = false;
+ for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) {
+ auto idx = static_cast<Runtime::CalleeSaveType>(i);
+ if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) {
+ found_one = true;
+ break;
+ }
+ }
+ CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig);
+ CHECK(copy->IsRuntimeMethod());
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
if (UNLIKELY(orig->IsAbstract())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(
GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_);
- copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+ copy->SetEntryPointFromInterpreterPtrSize(
reinterpret_cast<EntryPointFromInterpreter*>(const_cast<uint8_t*>(
GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_);
} else {
bool quick_is_interpreted;
const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
- copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_);
// JNI entrypoint:
if (orig->IsNative()) {
// The native method's pointer is set to a stub to lookup via dlsym.
// Note this is not the code_ pointer, that is handled above.
- copy->SetEntryPointFromJniPtrSize<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_),
- target_ptr_size_);
+ copy->SetEntryPointFromJniPtrSize(
+ GetOatAddress(jni_dlsym_lookup_offset_), target_ptr_size_);
}
// Interpreter entrypoint:
@@ -1267,8 +1429,7 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
EntryPointFromInterpreter* interpreter_entrypoint =
reinterpret_cast<EntryPointFromInterpreter*>(
const_cast<uint8_t*>(GetOatAddress(interpreter_code)));
- copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
- interpreter_entrypoint, target_ptr_size_);
+ copy->SetEntryPointFromInterpreterPtrSize(interpreter_entrypoint, target_ptr_size_);
}
}
}
@@ -1305,8 +1466,8 @@ size_t ImageWriter::GetBinSizeSum(ImageWriter::Bin up_to) const {
ImageWriter::BinSlot::BinSlot(uint32_t lockword) : lockword_(lockword) {
// These values may need to get updated if more bins are added to the enum Bin
- static_assert(kBinBits == 4, "wrong number of bin bits");
- static_assert(kBinShift == 28, "wrong number of shift");
+ static_assert(kBinBits == 3, "wrong number of bin bits");
+ static_assert(kBinShift == 27, "wrong number of shift");
static_assert(sizeof(BinSlot) == sizeof(LockWord), "BinSlot/LockWord must have equal sizes");
DCHECK_LT(GetBin(), kBinSize);
@@ -1326,13 +1487,4 @@ uint32_t ImageWriter::BinSlot::GetIndex() const {
return lockword_ & ~kBinMask;
}
-void ImageWriter::FreeStringDataArray() {
- if (string_data_array_ != nullptr) {
- gc::space::LargeObjectSpace* los = Runtime::Current()->GetHeap()->GetLargeObjectsSpace();
- if (los != nullptr) {
- los->Free(Thread::Current(), reinterpret_cast<mirror::Object*>(string_data_array_));
- }
- }
-}
-
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 5921732..a35d6ad 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -30,12 +30,13 @@
#include "base/macros.h"
#include "driver/compiler_driver.h"
#include "gc/space/space.h"
+#include "lock_word.h"
#include "mem_map.h"
#include "oat_file.h"
#include "mirror/dex_cache.h"
#include "os.h"
#include "safe_map.h"
-#include "gc/space/space.h"
+#include "utils.h"
namespace art {
@@ -53,18 +54,12 @@ class ImageWriter FINAL {
quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
bin_slot_sizes_(), bin_slot_previous_sizes_(), bin_slot_count_(),
- string_data_array_(nullptr) {
+ dirty_methods_(0u), clean_methods_(0u) {
CHECK_NE(image_begin, 0U);
+ std::fill(image_methods_, image_methods_ + arraysize(image_methods_), nullptr);
}
~ImageWriter() {
- // For interned strings a large array is allocated to hold all the character data and avoid
- // overhead. However, no GC is run anymore at this point. As the array is likely large, it
- // will be allocated in the large object space, where valgrind can track every single
- // allocation. Not explicitly freeing that array will be recognized as a leak.
- if (RUNNING_ON_VALGRIND != 0) {
- FreeStringDataArray();
- }
}
bool PrepareImageAddressSpace();
@@ -73,14 +68,14 @@ class ImageWriter FINAL {
return image_roots_address_ != 0u;
}
- mirror::Object* GetImageAddress(mirror::Object* object) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (object == nullptr) {
- return nullptr;
- }
- return reinterpret_cast<mirror::Object*>(image_begin_ + GetImageOffset(object));
+ template <typename T>
+ T* GetImageAddress(T* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return object == nullptr ? nullptr :
+ reinterpret_cast<T*>(image_begin_ + GetImageOffset(object));
}
+ ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
mirror::HeapReference<mirror::Object>* GetDexCacheArrayElementImageAddress(
const DexFile* dex_file, uint32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
auto it = dex_cache_array_starts_.find(dex_file);
@@ -90,11 +85,12 @@ class ImageWriter FINAL {
}
uint8_t* GetOatFileBegin() const {
- return image_begin_ + RoundUp(image_end_ + bin_slot_sizes_[kBinArtField], kPageSize);
+ return image_begin_ + RoundUp(
+ image_end_ + bin_slot_sizes_[kBinArtField] + bin_slot_sizes_[kBinArtMethodDirty] +
+ bin_slot_sizes_[kBinArtMethodClean], kPageSize);
}
- bool Write(const std::string& image_filename,
- const std::string& oat_filename,
+ bool Write(const std::string& image_filename, const std::string& oat_filename,
const std::string& oat_location)
LOCKS_EXCLUDED(Locks::mutator_lock_);
@@ -124,11 +120,15 @@ class ImageWriter FINAL {
kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics
kBinClassInitialized, // Class initializers have been run
kBinClassVerified, // Class verified, but initializers haven't been run
- kBinArtMethodNative, // Art method that is actually native
- kBinArtMethodNotInitialized, // Art method with a declaring class that wasn't initialized
// Add more bins here if we add more segregation code.
- // Non mirror fields must be below. ArtFields should be always clean.
+ // Non mirror fields must be below.
+ // ArtFields should be always clean.
kBinArtField,
+ // If the class is initialized, then the ArtMethods are probably clean.
+ kBinArtMethodClean,
+ // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
+ // initialized.
+ kBinArtMethodDirty,
kBinSize,
// Number of bins which are for mirror objects.
kBinMirrorCount = kBinArtField,
@@ -138,9 +138,12 @@ class ImageWriter FINAL {
static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
// uint32 = typeof(lockword_)
- static constexpr size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits;
+ // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
+ // failures due to invalid read barrier bits during object field reads.
+ static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits -
+ LockWord::kReadBarrierStateSize;
// 111000.....0
- static constexpr size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
+ static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
// We use the lock word to store the bin # and bin index of the object in the image.
//
@@ -172,6 +175,8 @@ class ImageWriter FINAL {
bool IsImageOffsetAssigned(mirror::Object* object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PrepareDexCacheArraySlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AssignImageBinSlot(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -181,6 +186,8 @@ class ImageWriter FINAL {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
@@ -197,10 +204,12 @@ class ImageWriter FINAL {
// With Quick, code is within the OatFile, as there are all in one
// .o ELF object.
DCHECK_LT(offset, oat_file_->Size());
- if (offset == 0u) {
- return nullptr;
- }
- return oat_data_begin_ + offset;
+ DCHECK(oat_data_begin_ != nullptr);
+ return offset == 0u ? nullptr : oat_data_begin_ + offset;
+ }
+
+ static bool IsArtMethodBin(Bin bin) {
+ return bin == kBinArtMethodClean || bin == kBinArtMethodDirty;
}
// Returns true if the class was in the original requested image classes list.
@@ -257,21 +266,20 @@ class ImageWriter FINAL {
static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CopyAndFixupObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool CopyAndFixupIfDexCacheFieldArray(mirror::Object* dst, mirror::Object* obj,
- mirror::Class* klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FixupMethod(mirror::ArtMethod* orig, mirror::ArtMethod* copy)
+ void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass,
+ Bin array_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
- const uint8_t* GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted)
+ const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const uint8_t* GetQuickEntryPoint(mirror::ArtMethod* method)
+ const uint8_t* GetQuickEntryPoint(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Patches references in OatFile to expect runtime addresses.
@@ -280,8 +288,11 @@ class ImageWriter FINAL {
// Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
size_t GetBinSizeSum(Bin up_to = kBinSize) const;
- // Release the string_data_array_.
- void FreeStringDataArray();
+ // Return true if a method is likely to be dirtied at runtime.
+ bool WillMethodBeDirty(ArtMethod* m) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Assign the offset for an ArtMethod.
+ void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const CompilerDriver& compiler_driver_;
@@ -308,9 +319,14 @@ class ImageWriter FINAL {
struct DexCacheArrayLocation {
size_t offset_;
size_t length_;
+ Bin bin_type_;
};
SafeMap<mirror::Object*, DexCacheArrayLocation> dex_cache_array_indexes_;
+ // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
+ // to keep track. These include vtable arrays, iftable arrays, and dex caches.
+ std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
+
// The start offsets of the dex cache arrays.
SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
@@ -344,12 +360,21 @@ class ImageWriter FINAL {
size_t bin_slot_previous_sizes_[kBinSize]; // Number of bytes in previous bins.
size_t bin_slot_count_[kBinSize]; // Number of objects in a bin
- // ArtField relocating map, ArtFields are allocated as array of structs but we want to have one
- // entry per art field for convenience.
- // ArtFields are placed right after the end of the image objects (aka sum of bin_slot_sizes_).
- std::unordered_map<ArtField*, uintptr_t> art_field_reloc_;
+ // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
+ // have one entry per art field for convenience. ArtFields are placed right after the end of the
+ // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
+ struct NativeObjectReloc {
+ uintptr_t offset;
+ Bin bin_type;
+ };
+ std::unordered_map<void*, NativeObjectReloc> native_object_reloc_;
+
+ // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
+ ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
- void* string_data_array_; // The backing for the interned strings.
+ // Counters for measurements, used for logging only.
+ uint64_t dirty_methods_;
+ uint64_t clean_methods_;
friend class FixupVisitor;
friend class FixupClassVisitor;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 7ed7097..55fef9b 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -16,6 +16,7 @@
#include "jit_compiler.h"
+#include "art_method-inl.h"
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
#include "base/time_utils.h"
@@ -27,7 +28,6 @@
#include "driver/compiler_options.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
-#include "mirror/art_method-inl.h"
#include "oat_file-inl.h"
#include "object_lock.h"
#include "thread_list.h"
@@ -54,7 +54,7 @@ extern "C" void jit_unload(void* handle) {
delete reinterpret_cast<JitCompiler*>(handle);
}
-extern "C" bool jit_compile_method(void* handle, mirror::ArtMethod* method, Thread* self)
+extern "C" bool jit_compile_method(void* handle, ArtMethod* method, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
@@ -105,34 +105,33 @@ JitCompiler::JitCompiler() : total_time_(0) {
JitCompiler::~JitCompiler() {
}
-bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) {
+bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) {
TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit));
const uint64_t start_time = NanoTime();
StackHandleScope<2> hs(self);
self->AssertNoPendingException();
Runtime* runtime = Runtime::Current();
- Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) {
VLOG(jit) << "Already compiled " << PrettyMethod(method);
return true; // Already compiled
}
- Handle<mirror::Class> h_class(hs.NewHandle(h_method->GetDeclaringClass()));
+ Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
{
TimingLogger::ScopedTiming t2("Initializing", &logger);
if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- VLOG(jit) << "JIT failed to initialize " << PrettyMethod(h_method.Get());
+ VLOG(jit) << "JIT failed to initialize " << PrettyMethod(method);
return false;
}
}
const DexFile* dex_file = h_class->GetDexCache()->GetDexFile();
- MethodReference method_ref(dex_file, h_method->GetDexMethodIndex());
+ MethodReference method_ref(dex_file, method->GetDexMethodIndex());
// Only verify if we don't already have verification results.
if (verification_results_->GetVerifiedMethod(method_ref) == nullptr) {
TimingLogger::ScopedTiming t2("Verifying", &logger);
std::string error;
- if (verifier::MethodVerifier::VerifyMethod(h_method.Get(), true, &error) ==
+ if (verifier::MethodVerifier::VerifyMethod(method, true, &error) ==
verifier::MethodVerifier::kHardFailure) {
- VLOG(jit) << "Not compile method " << PrettyMethod(h_method.Get())
+ VLOG(jit) << "Not compile method " << PrettyMethod(method)
<< " due to verification failure " << error;
return false;
}
@@ -140,7 +139,7 @@ bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) {
CompiledMethod* compiled_method = nullptr;
{
TimingLogger::ScopedTiming t2("Compiling", &logger);
- compiled_method = compiler_driver_->CompileMethod(self, h_method.Get());
+ compiled_method = compiler_driver_->CompileMethod(self, method);
}
{
TimingLogger::ScopedTiming t2("TrimMaps", &logger);
@@ -154,16 +153,15 @@ bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) {
// Don't add the method if we are supposed to be deoptimized.
bool result = false;
if (!runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
- const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(
- h_method.Get());
+ const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(method);
if (code != nullptr) {
// Already have some compiled code, just use this instead of linking.
// TODO: Fix recompilation.
- h_method->SetEntryPointFromQuickCompiledCode(code);
+ method->SetEntryPointFromQuickCompiledCode(code);
result = true;
} else {
TimingLogger::ScopedTiming t2("MakeExecutable", &logger);
- result = MakeExecutable(compiled_method, h_method.Get());
+ result = MakeExecutable(compiled_method, method);
}
}
// Remove the compiled method to save memory.
@@ -205,7 +203,7 @@ uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_me
return code_ptr;
}
-bool JitCompiler::AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method,
+bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method,
OatFile::OatMethod* out_method) {
Runtime* runtime = Runtime::Current();
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
@@ -261,7 +259,7 @@ bool JitCompiler::AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod
return true;
}
-bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method) {
+bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method) {
CHECK(method != nullptr);
CHECK(compiled_method != nullptr);
OatFile::OatMethod oat_method(nullptr, 0);
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index d9a5ac6..b0010e0 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -28,11 +28,8 @@
namespace art {
-class InstructionSetFeatures;
-
-namespace mirror {
class ArtMethod;
-}
+class InstructionSetFeatures;
namespace jit {
@@ -40,11 +37,11 @@ class JitCompiler {
public:
static JitCompiler* Create();
virtual ~JitCompiler();
- bool CompileMethod(Thread* self, mirror::ArtMethod* method)
+ bool CompileMethod(Thread* self, ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// This is in the compiler since the runtime doesn't have access to the compiled method
// structures.
- bool AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method,
+ bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method,
OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
CompilerCallbacks* GetCompilerCallbacks() const;
size_t GetTotalCompileTime() const {
@@ -65,7 +62,7 @@ class JitCompiler {
uint8_t* WriteMethodHeaderAndCode(
const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end,
const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
- bool MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method)
+ bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index eaf7872..09b6034 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -85,8 +85,8 @@ static constexpr uint8_t expected_asm_kArm64[] = {
0xF7, 0x63, 0x08, 0xA9, 0xF9, 0x6B, 0x09, 0xA9, 0xFB, 0x73, 0x0A, 0xA9,
0xFD, 0x7B, 0x0B, 0xA9, 0xE8, 0x27, 0x02, 0x6D, 0xEA, 0x2F, 0x03, 0x6D,
0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xF5, 0x03, 0x12, 0xAA,
- 0xE0, 0x03, 0x00, 0xB9, 0xE1, 0xC7, 0x00, 0xB9, 0xE0, 0xCB, 0x00, 0xBD,
- 0xE2, 0xCF, 0x00, 0xB9, 0xE3, 0xD3, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1,
+ 0xE0, 0x03, 0x00, 0xF9, 0xE1, 0xCB, 0x00, 0xB9, 0xE0, 0xCF, 0x00, 0xBD,
+ 0xE2, 0xD3, 0x00, 0xB9, 0xE3, 0xD7, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1,
0xFF, 0x83, 0x00, 0x91, 0xF2, 0x03, 0x15, 0xAA, 0xF3, 0x53, 0x46, 0xA9,
0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9, 0xF9, 0x6B, 0x49, 0xA9,
0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9, 0xE8, 0x27, 0x42, 0x6D,
@@ -138,11 +138,11 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
// 0x0000002c: .cfi_offset_extended: r78 at cfa-112
// 0x0000002c: .cfi_offset_extended: r79 at cfa-104
// 0x0000002c: mov x21, tr
-// 0x00000030: str w0, [sp]
-// 0x00000034: str w1, [sp, #196]
-// 0x00000038: str s0, [sp, #200]
-// 0x0000003c: str w2, [sp, #204]
-// 0x00000040: str w3, [sp, #208]
+// 0x00000030: str x0, [sp]
+// 0x00000034: str w1, [sp, #200]
+// 0x00000038: str s0, [sp, #204]
+// 0x0000003c: str w2, [sp, #208]
+// 0x00000040: str w3, [sp, #212]
// 0x00000044: sub sp, sp, #0x20 (32)
// 0x00000048: .cfi_def_cfa_offset: 224
// 0x00000048: add sp, sp, #0x20 (32)
@@ -238,20 +238,20 @@ static constexpr uint8_t expected_asm_kX86_64[] = {
0x41, 0x57, 0x41, 0x56, 0x41, 0x55, 0x41, 0x54, 0x55, 0x53, 0x48, 0x83,
0xEC, 0x48, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x40, 0xF2, 0x44, 0x0F,
0x11, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x30, 0xF2,
- 0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x89, 0x3C, 0x24, 0x89, 0xB4, 0x24,
- 0x84, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x88, 0x00, 0x00,
- 0x00, 0x89, 0x94, 0x24, 0x8C, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24, 0x90,
- 0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20, 0xF2,
- 0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24,
- 0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x10,
- 0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C, 0x41,
- 0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3,
+ 0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x48, 0x89, 0x3C, 0x24, 0x89, 0xB4,
+ 0x24, 0x88, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x8C, 0x00,
+ 0x00, 0x00, 0x89, 0x94, 0x24, 0x90, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24,
+ 0x94, 0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20,
+ 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C,
+ 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F,
+ 0x10, 0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C,
+ 0x41, 0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86_64[] = {
0x42, 0x0E, 0x10, 0x8F, 0x04, 0x42, 0x0E, 0x18, 0x8E, 0x06, 0x42, 0x0E,
0x20, 0x8D, 0x08, 0x42, 0x0E, 0x28, 0x8C, 0x0A, 0x41, 0x0E, 0x30, 0x86,
0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x80, 0x01, 0x47, 0xA0,
- 0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x65, 0x0E,
+ 0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x66, 0x0E,
0xA0, 0x01, 0x44, 0x0E, 0x80, 0x01, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47,
0xDF, 0x47, 0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E,
0x28, 0xC6, 0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E,
@@ -285,47 +285,47 @@ static constexpr uint8_t expected_cfi_kX86_64[] = {
// 0x00000023: .cfi_offset: r30 at cfa-80
// 0x00000023: movsd [rsp + 40], xmm12
// 0x0000002a: .cfi_offset: r29 at cfa-88
-// 0x0000002a: mov [rsp], edi
-// 0x0000002d: mov [rsp + 132], esi
-// 0x00000034: movss [rsp + 136], xmm0
-// 0x0000003d: mov [rsp + 140], edx
-// 0x00000044: mov [rsp + 144], ecx
-// 0x0000004b: addq rsp, -32
-// 0x0000004f: .cfi_def_cfa_offset: 160
-// 0x0000004f: addq rsp, 32
-// 0x00000053: .cfi_def_cfa_offset: 128
-// 0x00000053: .cfi_remember_state
-// 0x00000053: movsd xmm12, [rsp + 40]
-// 0x0000005a: .cfi_restore: r29
-// 0x0000005a: movsd xmm13, [rsp + 48]
-// 0x00000061: .cfi_restore: r30
-// 0x00000061: movsd xmm14, [rsp + 56]
-// 0x00000068: .cfi_restore: r31
-// 0x00000068: movsd xmm15, [rsp + 64]
-// 0x0000006f: .cfi_restore: r32
-// 0x0000006f: addq rsp, 72
-// 0x00000073: .cfi_def_cfa_offset: 56
-// 0x00000073: pop rbx
-// 0x00000074: .cfi_def_cfa_offset: 48
-// 0x00000074: .cfi_restore: r3
-// 0x00000074: pop rbp
-// 0x00000075: .cfi_def_cfa_offset: 40
-// 0x00000075: .cfi_restore: r6
-// 0x00000075: pop r12
-// 0x00000077: .cfi_def_cfa_offset: 32
-// 0x00000077: .cfi_restore: r12
-// 0x00000077: pop r13
-// 0x00000079: .cfi_def_cfa_offset: 24
-// 0x00000079: .cfi_restore: r13
-// 0x00000079: pop r14
-// 0x0000007b: .cfi_def_cfa_offset: 16
-// 0x0000007b: .cfi_restore: r14
-// 0x0000007b: pop r15
-// 0x0000007d: .cfi_def_cfa_offset: 8
-// 0x0000007d: .cfi_restore: r15
-// 0x0000007d: ret
-// 0x0000007e: .cfi_restore_state
-// 0x0000007e: .cfi_def_cfa_offset: 128
+// 0x0000002a: movq [rsp], rdi
+// 0x0000002e: mov [rsp + 136], esi
+// 0x00000035: movss [rsp + 140], xmm0
+// 0x0000003e: mov [rsp + 144], edx
+// 0x00000045: mov [rsp + 148], ecx
+// 0x0000004c: addq rsp, -32
+// 0x00000050: .cfi_def_cfa_offset: 160
+// 0x00000050: addq rsp, 32
+// 0x00000054: .cfi_def_cfa_offset: 128
+// 0x00000054: .cfi_remember_state
+// 0x00000054: movsd xmm12, [rsp + 40]
+// 0x0000005b: .cfi_restore: r29
+// 0x0000005b: movsd xmm13, [rsp + 48]
+// 0x00000062: .cfi_restore: r30
+// 0x00000062: movsd xmm14, [rsp + 56]
+// 0x00000069: .cfi_restore: r31
+// 0x00000069: movsd xmm15, [rsp + 64]
+// 0x00000070: .cfi_restore: r32
+// 0x00000070: addq rsp, 72
+// 0x00000074: .cfi_def_cfa_offset: 56
+// 0x00000074: pop rbx
+// 0x00000075: .cfi_def_cfa_offset: 48
+// 0x00000075: .cfi_restore: r3
+// 0x00000075: pop rbp
+// 0x00000076: .cfi_def_cfa_offset: 40
+// 0x00000076: .cfi_restore: r6
+// 0x00000076: pop r12
+// 0x00000078: .cfi_def_cfa_offset: 32
+// 0x00000078: .cfi_restore: r12
+// 0x00000078: pop r13
+// 0x0000007a: .cfi_def_cfa_offset: 24
+// 0x0000007a: .cfi_restore: r13
+// 0x0000007a: pop r14
+// 0x0000007c: .cfi_def_cfa_offset: 16
+// 0x0000007c: .cfi_restore: r14
+// 0x0000007c: pop r15
+// 0x0000007e: .cfi_def_cfa_offset: 8
+// 0x0000007e: .cfi_restore: r15
+// 0x0000007e: ret
+// 0x0000007f: .cfi_restore_state
+// 0x0000007f: .cfi_def_cfa_offset: 128
static constexpr uint8_t expected_asm_kMips[] = {
0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB8, 0xAF,
@@ -400,7 +400,7 @@ static constexpr uint8_t expected_cfi_kMips[] = {
// 0x0000006c: .cfi_restore: r31
// 0x0000006c: addiu r29, r29, 64
// 0x00000070: .cfi_def_cfa_offset: 0
-// 0x00000070: jalr r0, r31
+// 0x00000070: jr r31
// 0x00000074: nop
// 0x00000078: .cfi_restore_state
// 0x00000078: .cfi_def_cfa_offset: 64
@@ -409,8 +409,8 @@ static constexpr uint8_t expected_asm_kMips64[] = {
0xA0, 0xFF, 0xBD, 0x67, 0x58, 0x00, 0xBF, 0xFF, 0x50, 0x00, 0xBE, 0xFF,
0x48, 0x00, 0xBC, 0xFF, 0x40, 0x00, 0xB7, 0xFF, 0x38, 0x00, 0xB6, 0xFF,
0x30, 0x00, 0xB5, 0xFF, 0x28, 0x00, 0xB4, 0xFF, 0x20, 0x00, 0xB3, 0xFF,
- 0x18, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xAF, 0x64, 0x00, 0xA5, 0xAF,
- 0x68, 0x00, 0xAE, 0xE7, 0x6C, 0x00, 0xA7, 0xAF, 0x70, 0x00, 0xA8, 0xAF,
+ 0x18, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x68, 0x00, 0xA5, 0xAF,
+ 0x6C, 0x00, 0xAE, 0xE7, 0x70, 0x00, 0xA7, 0xAF, 0x74, 0x00, 0xA8, 0xAF,
0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x18, 0x00, 0xB2, 0xDF,
0x20, 0x00, 0xB3, 0xDF, 0x28, 0x00, 0xB4, 0xDF, 0x30, 0x00, 0xB5, 0xDF,
0x38, 0x00, 0xB6, 0xDF, 0x40, 0x00, 0xB7, 0xDF, 0x48, 0x00, 0xBC, 0xDF,
@@ -445,11 +445,11 @@ static constexpr uint8_t expected_cfi_kMips64[] = {
// 0x00000024: .cfi_offset: r19 at cfa-64
// 0x00000024: sd r18, +24(r29)
// 0x00000028: .cfi_offset: r18 at cfa-72
-// 0x00000028: sw r4, +0(r29)
-// 0x0000002c: sw r5, +100(r29)
-// 0x00000030: swc1 f14, +104(r29)
-// 0x00000034: sw r7, +108(r29)
-// 0x00000038: sw r8, +112(r29)
+// 0x00000028: sd r4, +0(r29)
+// 0x0000002c: sw r5, +104(r29)
+// 0x00000030: swc1 f14, +108(r29)
+// 0x00000034: sw r7, +112(r29)
+// 0x00000038: sw r8, +116(r29)
// 0x0000003c: daddiu r29, r29, -32
// 0x00000040: .cfi_def_cfa_offset: 128
// 0x00000040: daddiu r29, r29, 32
@@ -479,4 +479,3 @@ static constexpr uint8_t expected_cfi_kMips64[] = {
// 0x00000070: nop
// 0x00000074: .cfi_restore_state
// 0x00000074: .cfi_def_cfa_offset: 96
-
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 4186891..e98e572 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -18,6 +18,7 @@
#include <math.h>
+#include "art_method-inl.h"
#include "class_linker.h"
#include "common_compiler_test.h"
#include "dex_file.h"
@@ -25,7 +26,6 @@
#include "indirect_reference_table.h"
#include "jni_internal.h"
#include "mem_map.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/object_array-inl.h"
@@ -65,12 +65,9 @@ class JniCompilerTest : public CommonCompilerTest {
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
// Compile the native method before starting the runtime
mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader);
- mirror::ArtMethod* method;
- if (direct) {
- method = c->FindDirectMethod(method_name, method_sig);
- } else {
- method = c->FindVirtualMethod(method_name, method_sig);
- }
+ const auto pointer_size = class_linker_->GetImagePointerSize();
+ ArtMethod* method = direct ? c->FindDirectMethod(method_name, method_sig, pointer_size) :
+ c->FindVirtualMethod(method_name, method_sig, pointer_size);
ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
if (check_generic_jni_) {
method->SetEntryPointFromQuickCompiledCode(class_linker_->GetRuntimeQuickGenericJniStub());
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index d3690b2..9d2732a 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -257,8 +257,7 @@ ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
size_t ArmJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
- size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
- (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ size_t frame_data_size = kArmPointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 4344c90..b094747 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -99,8 +99,8 @@ ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() {
FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
CHECK(IsCurrentParamOnStack());
FrameOffset result =
- FrameOffset(displacement_.Int32Value() + // displacement
- sizeof(StackReference<mirror::ArtMethod>) + // Method ref
+ FrameOffset(displacement_.Int32Value() + // displacement
+ kFramePointerSize + // Method ref
(itr_slots_ * sizeof(uint32_t))); // offset into in args
return result;
}
@@ -206,7 +206,7 @@ ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
size_t Arm64JniCallingConvention::FrameSize() {
// Method*, callee save area size, local reference segment state
- size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ size_t frame_data_size = kFramePointerSize +
CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 2e146c4..bb8136b 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -131,7 +131,7 @@ size_t JniCallingConvention::ReferenceCount() const {
FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
size_t references_size = handle_scope_pointer_size_ * ReferenceCount(); // size excluding header
- return FrameOffset(HandleerencesOffset().Int32Value() + references_size);
+ return FrameOffset(HandleReferencesOffset().Int32Value() + references_size);
}
FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
@@ -228,7 +228,7 @@ bool JniCallingConvention::IsCurrentParamALong() {
FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() {
CHECK(IsCurrentParamAReference());
CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset());
- int result = HandleerencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_;
+ int result = HandleReferencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_;
CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value());
return FrameOffset(result);
}
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 0c64a36..c9b595a 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -171,7 +171,7 @@ class CallingConvention {
if (IsStatic()) {
param++; // 0th argument must skip return value at start of the shorty
} else if (param == 0) {
- return frame_pointer_size_; // this argument
+ return sizeof(mirror::HeapReference<mirror::Object>); // this argument
}
size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[param]));
if (result >= 1 && result < 4) {
@@ -196,7 +196,7 @@ class CallingConvention {
unsigned int itr_float_and_doubles_;
// Space for frames below this on the stack.
FrameOffset displacement_;
- // The size of a reference.
+ // The size of a pointer.
const size_t frame_pointer_size_;
// The size of a reference entry within the handle scope.
const size_t handle_scope_pointer_size_;
@@ -320,12 +320,13 @@ class JniCallingConvention : public CallingConvention {
// Position of handle scope and interior fields
FrameOffset HandleScopeOffset() const {
- return FrameOffset(this->displacement_.Int32Value() + sizeof(StackReference<mirror::ArtMethod>));
+ return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_);
// above Method reference
}
FrameOffset HandleScopeLinkOffset() const {
- return FrameOffset(HandleScopeOffset().Int32Value() + HandleScope::LinkOffset(frame_pointer_size_));
+ return FrameOffset(HandleScopeOffset().Int32Value() +
+ HandleScope::LinkOffset(frame_pointer_size_));
}
FrameOffset HandleScopeNumRefsOffset() const {
@@ -333,7 +334,7 @@ class JniCallingConvention : public CallingConvention {
HandleScope::NumberOfReferencesOffset(frame_pointer_size_));
}
- FrameOffset HandleerencesOffset() const {
+ FrameOffset HandleReferencesOffset() const {
return FrameOffset(HandleScopeOffset().Int32Value() +
HandleScope::ReferencesOffset(frame_pointer_size_));
}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index a06303d..0347c5e 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -21,6 +21,7 @@
#include <vector>
#include <fstream>
+#include "art_method.h"
#include "base/logging.h"
#include "base/macros.h"
#include "calling_convention.h"
@@ -31,7 +32,6 @@
#include "driver/compiler_options.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "jni_env_ext.h"
-#include "mirror/art_method.h"
#include "utils/assembler.h"
#include "utils/managed_register.h"
#include "utils/arm/managed_register_arm.h"
@@ -117,18 +117,18 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
if (is_64_bit_target) {
__ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<8>(),
- mr_conv->InterproceduralScratchRegister());
+ Thread::TopHandleScopeOffset<8>(),
+ mr_conv->InterproceduralScratchRegister());
__ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(),
- main_jni_conv->HandleScopeOffset(),
- mr_conv->InterproceduralScratchRegister());
+ main_jni_conv->HandleScopeOffset(),
+ mr_conv->InterproceduralScratchRegister());
} else {
__ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<4>(),
- mr_conv->InterproceduralScratchRegister());
+ Thread::TopHandleScopeOffset<4>(),
+ mr_conv->InterproceduralScratchRegister());
__ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(),
- main_jni_conv->HandleScopeOffset(),
- mr_conv->InterproceduralScratchRegister());
+ main_jni_conv->HandleScopeOffset(),
+ mr_conv->InterproceduralScratchRegister());
}
// 3. Place incoming reference arguments into handle scope
@@ -138,10 +138,10 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
// Check handle scope offset is within frame
CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
- // Note this LoadRef() already includes the heap poisoning negation.
+ // Note this LoadRef() doesn't need heap poisoning since its from the ArtMethod.
// Note this LoadRef() does not include read barrier. It will be handled below.
__ LoadRef(main_jni_conv->InterproceduralScratchRegister(),
- mr_conv->MethodRegister(), mirror::ArtMethod::DeclaringClassOffset());
+ mr_conv->MethodRegister(), ArtMethod::DeclaringClassOffset(), false);
__ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false);
__ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister());
main_jni_conv->Next(); // in handle scope so move to next argument
@@ -251,12 +251,11 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
if (main_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
__ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset,
- mr_conv->InterproceduralScratchRegister(),
- false);
+ mr_conv->InterproceduralScratchRegister(), false);
} else {
ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
__ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset,
- ManagedRegister::NoRegister(), false);
+ ManagedRegister::NoRegister(), false);
}
main_jni_conv->Next();
}
@@ -264,10 +263,10 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
__ GetCurrentThread(main_jni_conv->CurrentParamRegister());
if (is_64_bit_target) {
__ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start64),
- main_jni_conv->InterproceduralScratchRegister());
+ main_jni_conv->InterproceduralScratchRegister());
} else {
__ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start32),
- main_jni_conv->InterproceduralScratchRegister());
+ main_jni_conv->InterproceduralScratchRegister());
}
} else {
__ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
@@ -347,15 +346,15 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
if (is_64_bit_target) {
__ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(),
- main_jni_conv->InterproceduralScratchRegister());
+ main_jni_conv->InterproceduralScratchRegister());
} else {
__ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(),
- main_jni_conv->InterproceduralScratchRegister());
+ main_jni_conv->InterproceduralScratchRegister());
}
}
// 9. Plant call to native code associated with method.
- MemberOffset jni_entrypoint_offset = mirror::ArtMethod::EntryPointFromJniOffset(
+ MemberOffset jni_entrypoint_offset = ArtMethod::EntryPointFromJniOffset(
InstructionSetPointerSize(instruction_set));
__ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset,
mr_conv->InterproceduralScratchRegister());
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index aefbf06..4e716b5 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -148,7 +148,7 @@ ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const {
size_t MipsJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
- size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ size_t frame_data_size = kMipsPointerSize +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index d446867..3a11bcf 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -84,9 +84,9 @@ ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
CHECK(IsCurrentParamOnStack());
FrameOffset result =
- FrameOffset(displacement_.Int32Value() + // displacement
- sizeof(StackReference<mirror::ArtMethod>) + // Method ref
- (itr_slots_ * sizeof(uint32_t))); // offset into in args
+ FrameOffset(displacement_.Int32Value() + // displacement
+ kFramePointerSize + // Method ref
+ (itr_slots_ * sizeof(uint32_t))); // offset into in args
return result;
}
@@ -149,7 +149,7 @@ ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
size_t Mips64JniCallingConvention::FrameSize() {
// Mehtod* and callee save area size, local reference segment state
- size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ size_t frame_data_size = kFramePointerSize +
CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 499dd7c..322caca 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -180,7 +180,7 @@ uint32_t X86JniCallingConvention::CoreSpillMask() const {
size_t X86JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ size_t frame_data_size = kX86PointerSize +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 7e92d12..9c7eab1 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -97,9 +97,9 @@ ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
}
FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
- return FrameOffset(displacement_.Int32Value() + // displacement
- sizeof(StackReference<mirror::ArtMethod>) + // Method ref
- (itr_slots_ * sizeof(uint32_t))); // offset into in args
+ return FrameOffset(displacement_.Int32Value() + // displacement
+ kX86_64PointerSize + // Method ref
+ itr_slots_ * sizeof(uint32_t)); // offset into in args
}
const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() {
@@ -149,7 +149,7 @@ uint32_t X86_64JniCallingConvention::FpSpillMask() const {
size_t X86_64JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ size_t frame_data_size = kX86_64PointerSize +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index b17cbca..d010430 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -16,8 +16,8 @@
#include "linker/arm/relative_patcher_thumb2.h"
+#include "art_method.h"
#include "compiled_method.h"
-#include "mirror/art_method.h"
#include "utils/arm/assembler_thumb2.h"
namespace art {
@@ -80,7 +80,7 @@ std::vector<uint8_t> Thumb2RelativePatcher::CompileThunkCode() {
arm::Thumb2Assembler assembler;
assembler.LoadFromOffset(
arm::kLoadWord, arm::PC, arm::R0,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
assembler.bkpt(0);
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 72ddf07..ee48789 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -17,9 +17,9 @@
#include "linker/arm64/relative_patcher_arm64.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "art_method.h"
#include "compiled_method.h"
#include "driver/compiler_driver.h"
-#include "mirror/art_method.h"
#include "utils/arm64/assembler_arm64.h"
#include "oat.h"
#include "output_stream.h"
@@ -158,6 +158,8 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code,
uint32_t insn = GetInsn(code, literal_offset);
uint32_t pc_insn_offset = patch.PcInsnOffset();
uint32_t disp = target_offset - ((patch_offset - literal_offset + pc_insn_offset) & ~0xfffu);
+ bool wide = (insn & 0x40000000) != 0;
+ uint32_t shift = wide ? 3u : 2u;
if (literal_offset == pc_insn_offset) {
// Check it's an ADRP with imm == 0 (unset).
DCHECK_EQ((insn & 0xffffffe0u), 0x90000000u)
@@ -173,7 +175,7 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code,
uint32_t out_disp = thunk_offset - patch_offset;
DCHECK_EQ(out_disp & 3u, 0u);
DCHECK((out_disp >> 27) == 0u || (out_disp >> 27) == 31u); // 28-bit signed.
- insn = (out_disp & 0x0fffffffu) >> 2;
+ insn = (out_disp & 0x0fffffffu) >> shift;
insn |= 0x14000000; // B <thunk>
uint32_t back_disp = -out_disp;
@@ -194,7 +196,8 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code,
// Write the new ADRP (or B to the erratum 843419 thunk).
SetInsn(code, literal_offset, insn);
} else {
- DCHECK_EQ(insn & 0xfffffc00, 0xb9400000); // LDR 32-bit with imm12 == 0 (unset).
+ // LDR 32-bit or 64-bit with imm12 == 0 (unset).
+ DCHECK_EQ(insn & 0xbffffc00, 0xb9400000) << insn;
if (kIsDebugBuild) {
uint32_t adrp = GetInsn(code, pc_insn_offset);
if ((adrp & 0x9f000000u) != 0x90000000u) {
@@ -216,7 +219,7 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code,
CHECK_EQ(adrp & 0x9f00001fu, // Check that pc_insn_offset points
0x90000000 | ((insn >> 5) & 0x1fu)); // to ADRP with matching register.
}
- uint32_t imm12 = (disp & 0xfffu) >> 2;
+ uint32_t imm12 = (disp & 0xfffu) >> shift;
insn = (insn & ~(0xfffu << 10)) | (imm12 << 10);
SetInsn(code, literal_offset, insn);
}
@@ -226,7 +229,7 @@ std::vector<uint8_t> Arm64RelativePatcher::CompileThunkCode() {
// The thunk just uses the entry point in the ArtMethod. This works even for calls
// to the generic JNI and interpreter trampolines.
arm64::Arm64Assembler assembler;
- Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
// Ensure we emit the literal pool.
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index a871a82..0747756 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -15,6 +15,7 @@
*/
#include "arch/instruction_set_features.h"
+#include "art_method-inl.h"
#include "class_linker.h"
#include "common_compiler_test.h"
#include "compiled_method.h"
@@ -26,7 +27,6 @@
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "entrypoints/quick/quick_entrypoints.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
@@ -41,7 +41,7 @@ class OatTest : public CommonCompilerTest {
protected:
static const bool kCompile = false; // DISABLED_ due to the time to compile libcore
- void CheckMethod(mirror::ArtMethod* method,
+ void CheckMethod(ArtMethod* method,
const OatFile::OatMethod& oat_method,
const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -140,16 +140,18 @@ TEST_F(OatTest, WriteRead) {
ASSERT_TRUE(oat_dex_file != nullptr);
CHECK_EQ(dex_file.GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum());
ScopedObjectAccess soa(Thread::Current());
+ auto pointer_size = class_linker->GetImagePointerSize();
for (size_t i = 0; i < dex_file.NumClassDefs(); i++) {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
const uint8_t* class_data = dex_file.GetClassData(class_def);
+
size_t num_virtual_methods = 0;
if (class_data != nullptr) {
ClassDataItemIterator it(dex_file, class_data);
num_virtual_methods = it.NumVirtualMethods();
}
+
const char* descriptor = dex_file.GetClassDescriptor(class_def);
- StackHandleScope<1> hs(soa.Self());
mirror::Class* klass = class_linker->FindClass(soa.Self(), descriptor,
NullHandle<mirror::ClassLoader>());
@@ -159,14 +161,19 @@ TEST_F(OatTest, WriteRead) {
oat_class.GetType()) << descriptor;
size_t method_index = 0;
- for (size_t j = 0; j < klass->NumDirectMethods(); j++, method_index++) {
- CheckMethod(klass->GetDirectMethod(j),
- oat_class.GetOatMethod(method_index), dex_file);
+ for (auto& m : klass->GetDirectMethods(pointer_size)) {
+ CheckMethod(&m, oat_class.GetOatMethod(method_index), dex_file);
+ ++method_index;
}
- for (size_t j = 0; j < num_virtual_methods; j++, method_index++) {
- CheckMethod(klass->GetVirtualMethod(j),
- oat_class.GetOatMethod(method_index), dex_file);
+ size_t visited_virtuals = 0;
+ for (auto& m : klass->GetVirtualMethods(pointer_size)) {
+ if (!m.IsMiranda()) {
+ CheckMethod(&m, oat_class.GetOatMethod(method_index), dex_file);
+ ++method_index;
+ ++visited_virtuals;
+ }
}
+ EXPECT_EQ(visited_virtuals, num_virtual_methods);
}
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 15b4017..633bf64 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -19,6 +19,7 @@
#include <zlib.h>
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "art_method-inl.h"
#include "base/allocator.h"
#include "base/bit_vector.h"
#include "base/stl_util.h"
@@ -33,7 +34,6 @@
#include "gc/space/space.h"
#include "image_writer.h"
#include "linker/relative_patcher.h"
-#include "mirror/art_method-inl.h"
#include "mirror/array.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
@@ -620,10 +620,9 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file_)));
- mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, it.GetMemberIndex(), dex_cache,
- NullHandle<mirror::ClassLoader>(),
- NullHandle<mirror::ArtMethod>(),
- invoke_type);
+ ArtMethod* method = linker->ResolveMethod(
+ *dex_file_, it.GetMemberIndex(), dex_cache, NullHandle<mirror::ClassLoader>(), nullptr,
+ invoke_type);
if (method == nullptr) {
LOG(ERROR) << "Unexpected failure to resolve a method: "
<< PrettyMethod(it.GetMemberIndex(), *dex_file_, true);
@@ -755,8 +754,8 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
uint32_t target_offset = GetTargetOffset(patch);
PatchCodeAddress(&patched_code_, patch.LiteralOffset(), target_offset);
} else if (patch.Type() == kLinkerPatchMethod) {
- mirror::ArtMethod* method = GetTargetMethod(patch);
- PatchObjectAddress(&patched_code_, patch.LiteralOffset(), method);
+ ArtMethod* method = GetTargetMethod(patch);
+ PatchMethodAddress(&patched_code_, patch.LiteralOffset(), method);
} else if (patch.Type() == kLinkerPatchType) {
mirror::Class* type = GetTargetType(patch);
PatchObjectAddress(&patched_code_, patch.LiteralOffset(), type);
@@ -794,12 +793,13 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
<< PrettyMethod(it.GetMemberIndex(), *dex_file_) << " to " << out_->GetLocation();
}
- mirror::ArtMethod* GetTargetMethod(const LinkerPatch& patch)
+ ArtMethod* GetTargetMethod(const LinkerPatch& patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
MethodReference ref = patch.TargetMethod();
mirror::DexCache* dex_cache =
(dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(*ref.dex_file);
- mirror::ArtMethod* method = dex_cache->GetResolvedMethod(ref.dex_method_index);
+ ArtMethod* method = dex_cache->GetResolvedMethod(
+ ref.dex_method_index, class_linker_->GetImagePointerSize());
CHECK(method != nullptr);
return method;
}
@@ -810,7 +810,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
(target_it != writer_->method_offset_map_.map.end()) ? target_it->second : 0u;
// If there's no compiled code, point to the correct trampoline.
if (UNLIKELY(target_offset == 0)) {
- mirror::ArtMethod* target = GetTargetMethod(patch);
+ ArtMethod* target = GetTargetMethod(patch);
DCHECK(target != nullptr);
size_t size = GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet());
const void* oat_code_offset = target->GetEntryPointFromQuickCompiledCodePtrSize(size);
@@ -865,6 +865,23 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
data[3] = (address >> 24) & 0xffu;
}
+ void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // NOTE: Direct method pointers across oat files don't use linker patches. However, direct
+ // type pointers across oat files do. (TODO: Investigate why.)
+ if (writer_->image_writer_ != nullptr) {
+ method = writer_->image_writer_->GetImageMethodAddress(method);
+ }
+ // Note: We only patch ArtMethods to low 4gb since thats where the image is.
+ uint32_t address = PointerToLowMemUInt32(method);
+ DCHECK_LE(offset + 4, code->size());
+ uint8_t* data = &(*code)[offset];
+ data[0] = address & 0xffu;
+ data[1] = (address >> 8) & 0xffu;
+ data[2] = (address >> 16) & 0xffu;
+ data[3] = (address >> 24) & 0xffu;
+ }
+
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t address = writer_->image_writer_ == nullptr ? target_offset :
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index a5c6f23..58416ee 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -665,9 +665,8 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
*dex_compilation_unit_->GetDexFile())));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
- mirror::ArtMethod* resolved_method = compiler_driver_->ResolveMethod(
- soa, dex_cache, class_loader, dex_compilation_unit_, method_idx,
- optimized_invoke_type);
+ ArtMethod* resolved_method = compiler_driver_->ResolveMethod(
+ soa, dex_cache, class_loader, dex_compilation_unit_, method_idx, optimized_invoke_type);
if (resolved_method == nullptr) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedMethod);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0e776b3..a5d5305 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -114,18 +114,24 @@ size_t CodeGenerator::GetCacheOffset(uint32_t index) {
return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
}
+size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
+ auto pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ return mirror::Array::DataOffset(pointer_size).Uint32Value() + pointer_size * index;
+}
+
void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
Initialize();
if (!is_leaf) {
MarkNotLeaf();
}
+ const bool is_64_bit = Is64BitInstructionSet(GetInstructionSet());
InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs()
+ GetGraph()->GetTemporariesVRegSlots()
+ 1 /* filler */,
0, /* the baseline compiler does not have live registers at slow path */
0, /* the baseline compiler does not have live registers at slow path */
GetGraph()->GetMaximumNumberOfOutVRegs()
- + 1 /* current method */,
+ + (is_64_bit ? 2 : 1) /* current method */,
GetGraph()->GetBlocks());
CompileInternal(allocator, /* is_baseline */ true);
}
@@ -270,7 +276,8 @@ int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
if (reg_number >= number_of_locals) {
// Local is a parameter of the method. It is stored in the caller's frame.
- return GetFrameSize() + kVRegSize // ART method
+ // TODO: Share this logic with StackVisitor::GetVRegOffsetFromQuickCode.
+ return GetFrameSize() + InstructionSetPointerSize(GetInstructionSet()) // ART method
+ (reg_number - number_of_locals) * kVRegSize;
} else {
// Local is a temporary in this method. It is stored in this method's frame.
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index bdbd571..c6317f1 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -145,7 +145,7 @@ class CodeGenerator {
size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
// Note that this follows the current calling convention.
return GetFrameSize()
- + kVRegSize // Art method
+ + InstructionSetPointerSize(GetInstructionSet()) // Art method
+ parameter->GetIndex() * kVRegSize;
}
@@ -266,6 +266,8 @@ class CodeGenerator {
// Note: this method assumes we always have the same pointer size, regardless
// of the architecture.
static size_t GetCacheOffset(uint32_t index);
+ // Pointer variant for ArtMethod and ArtField arrays.
+ size_t GetCachePointerOffset(uint32_t index);
void EmitParallelMoves(Location from1,
Location to1,
@@ -469,11 +471,13 @@ class CallingConvention {
CallingConvention(const C* registers,
size_t number_of_registers,
const F* fpu_registers,
- size_t number_of_fpu_registers)
+ size_t number_of_fpu_registers,
+ size_t pointer_size)
: registers_(registers),
number_of_registers_(number_of_registers),
fpu_registers_(fpu_registers),
- number_of_fpu_registers_(number_of_fpu_registers) {}
+ number_of_fpu_registers_(number_of_fpu_registers),
+ pointer_size_(pointer_size) {}
size_t GetNumberOfRegisters() const { return number_of_registers_; }
size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; }
@@ -490,8 +494,8 @@ class CallingConvention {
size_t GetStackOffsetOf(size_t index) const {
// We still reserve the space for parameters passed by registers.
- // Add one for the method pointer.
- return (index + 1) * kVRegSize;
+ // Add space for the method pointer.
+ return pointer_size_ + index * kVRegSize;
}
private:
@@ -499,6 +503,7 @@ class CallingConvention {
const size_t number_of_registers_;
const F* fpu_registers_;
const size_t number_of_fpu_registers_;
+ const size_t pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CallingConvention);
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 13775fe..2b1131d 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -17,13 +17,13 @@
#include "code_generator_arm.h"
#include "arch/arm/instruction_set_features_arm.h"
+#include "art_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "intrinsics.h"
#include "intrinsics_arm.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "thread.h"
#include "utils/arm/assembler_arm.h"
#include "utils/arm/managed_register_arm.h"
@@ -1312,8 +1312,8 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
- invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1326,7 +1326,7 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
@@ -1346,8 +1346,8 @@ void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
- (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1365,7 +1365,7 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
@@ -3796,12 +3796,12 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
codegen_->LoadCurrentMethod(out);
- __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value());
} else {
DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
__ LoadFromOffset(
- kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
+ kLoadWord, out, out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
@@ -3858,7 +3858,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
Register out = load->GetLocations()->Out().AsRegister<Register>();
codegen_->LoadCurrentMethod(out);
- __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
__ cmp(out, ShifterOperand(0));
@@ -4081,7 +4081,7 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
__ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
// LR = temp[offset_of_quick_compiled_code]
__ LoadFromOffset(kLoadWord, LR, temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmWordSize).Int32Value());
// LR()
__ blx(LR);
@@ -4091,14 +4091,13 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
if (!invoke->IsRecursive()) {
// temp = temp->dex_cache_resolved_methods_;
__ LoadFromOffset(
- kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+ kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
// temp = temp[index_in_cache]
__ LoadFromOffset(
kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
// LR = temp[offset_of_quick_compiled_code]
- __ LoadFromOffset(kLoadWord, LR, temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmWordSize).Int32Value());
+ __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value());
// LR()
__ blx(LR);
} else {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 1a498e1..c410fa8 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -54,7 +54,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegis
: CallingConvention(kRuntimeParameterCoreRegisters,
kRuntimeParameterCoreRegistersLength,
kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
+ kRuntimeParameterFpuRegistersLength,
+ kArmPointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
@@ -72,7 +73,8 @@ class InvokeDexCallingConvention : public CallingConvention<Register, SRegister>
: CallingConvention(kParameterCoreRegisters,
kParameterCoreRegistersLength,
kParameterFpuRegisters,
- kParameterFpuRegistersLength) {}
+ kParameterFpuRegistersLength,
+ kArmPointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0222f93..55ef66f 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -17,6 +17,7 @@
#include "code_generator_arm64.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "art_method.h"
#include "common_arm64.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -24,8 +25,7 @@
#include "intrinsics.h"
#include "intrinsics_arm64.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "offsets.h"
#include "thread.h"
#include "utils/arm64/assembler_arm64.h"
@@ -65,7 +65,6 @@ using helpers::WRegisterFrom;
using helpers::XRegisterFrom;
using helpers::ARM64EncodableConstantOrRegister;
-static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
static constexpr int kCurrentMethodStackOffset = 0;
inline Condition ARM64Condition(IfCondition cond) {
@@ -968,7 +967,7 @@ void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) {
DCHECK(RequiresCurrentMethod());
- DCHECK(current_method.IsW());
+ CHECK(current_method.IsX());
__ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
}
@@ -1940,12 +1939,12 @@ void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
- Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
- uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
- (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
Location receiver = invoke->GetLocations()->InAt(0);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
// The register ip1 is required to be used for the hidden argument in
// art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
@@ -1957,16 +1956,16 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
- __ Ldr(temp, StackOperandFrom(receiver));
- __ Ldr(temp, HeapOperand(temp, class_offset));
+ __ Ldr(temp.W(), StackOperandFrom(receiver));
+ __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
} else {
- __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
- __ Ldr(temp, HeapOperand(temp, method_offset));
+ __ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
- __ Ldr(lr, HeapOperand(temp, entry_point));
+ __ Ldr(lr, MemOperand(temp, entry_point.Int32Value()));
// lr();
__ Blr(lr);
DCHECK(!codegen_->IsLeafMethod());
@@ -2007,8 +2006,7 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codege
void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
// Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
DCHECK(temp.Is(kArtMethodRegister));
- size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() +
- invoke->GetDexMethodIndex() * kHeapRefSize;
+ size_t index_in_cache = GetCachePointerOffset(invoke->GetDexMethodIndex());
// TODO: Implement all kinds of calls:
// 1) boot -> boot
@@ -2019,23 +2017,24 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
if (invoke->IsStringInit()) {
// temp = thread->string_init_entrypoint
- __ Ldr(temp, HeapOperand(tr, invoke->GetStringInitOffset()));
+ __ Ldr(temp.X(), MemOperand(tr, invoke->GetStringInitOffset()));
// LR = temp->entry_point_from_quick_compiled_code_;
- __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArm64WordSize)));
+ __ Ldr(lr, MemOperand(
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
// lr()
__ Blr(lr);
} else {
// temp = method;
- LoadCurrentMethod(temp);
+ LoadCurrentMethod(temp.X());
if (!invoke->IsRecursive()) {
// temp = temp->dex_cache_resolved_methods_;
- __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
+ __ Ldr(temp.W(), MemOperand(temp.X(),
+ ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
// temp = temp[index_in_cache];
- __ Ldr(temp, HeapOperand(temp, index_in_cache));
+ __ Ldr(temp.X(), MemOperand(temp, index_in_cache));
// lr = temp->entry_point_from_quick_compiled_code_;
- __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArm64WordSize)));
+ __ Ldr(lr, MemOperand(temp.X(), ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64WordSize).Int32Value()));
// lr();
__ Blr(lr);
} else {
@@ -2056,7 +2055,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
}
BlockPoolsScope block_pools(GetVIXLAssembler());
- Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
codegen_->GenerateStaticOrDirectCall(invoke, temp);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -2068,27 +2067,27 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
- Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
- size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
- invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
BlockPoolsScope block_pools(GetVIXLAssembler());
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
- __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex()));
- __ Ldr(temp, HeapOperand(temp, class_offset));
+ __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex()));
+ __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
} else {
DCHECK(receiver.IsRegister());
- __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
- __ Ldr(temp, HeapOperand(temp, method_offset));
+ __ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
- __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue()));
+ __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
// lr();
__ Blr(lr);
DCHECK(!codegen_->IsLeafMethod());
@@ -2107,12 +2106,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- codegen_->LoadCurrentMethod(out);
- __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
+ codegen_->LoadCurrentMethod(out.X());
+ __ Ldr(out, MemOperand(out.X(), ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
- codegen_->LoadCurrentMethod(out);
- __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
+ codegen_->LoadCurrentMethod(out.X());
+ __ Ldr(out, MemOperand(out.X(), ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
@@ -2159,8 +2158,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
codegen_->AddSlowPath(slow_path);
Register out = OutputRegister(load);
- codegen_->LoadCurrentMethod(out);
- __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
+ codegen_->LoadCurrentMethod(out.X());
+ __ Ldr(out, MemOperand(out.X(), ArtMethod::DeclaringClassOffset().Int32Value()));
__ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
__ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ Cbz(out, slow_path->GetEntryLabel());
@@ -2288,7 +2287,7 @@ void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
locations->SetOut(LocationFrom(x0));
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
- void*, uint32_t, int32_t, mirror::ArtMethod*>();
+ void*, uint32_t, int32_t, ArtMethod*>();
}
void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
@@ -2296,17 +2295,16 @@ void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
InvokeRuntimeCallingConvention calling_convention;
Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
DCHECK(type_index.Is(w0));
- Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
- DCHECK(current_method.Is(w2));
- codegen_->LoadCurrentMethod(current_method);
+ Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimLong);
+ DCHECK(current_method.Is(x2));
+ codegen_->LoadCurrentMethod(current_method.X());
__ Mov(type_index, instruction->GetTypeIndex());
codegen_->InvokeRuntime(
GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr);
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
- void*, uint32_t, int32_t, mirror::ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -2316,7 +2314,7 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
}
void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -2325,14 +2323,14 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
DCHECK(type_index.Is(w0));
Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
DCHECK(current_method.Is(w1));
- codegen_->LoadCurrentMethod(current_method);
+ codegen_->LoadCurrentMethod(current_method.X());
__ Mov(type_index, instruction->GetTypeIndex());
codegen_->InvokeRuntime(
GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr);
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 8aeea54..3486cde 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -45,7 +45,7 @@ static const vixl::FPRegister kParameterFPRegisters[] = {
static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
const vixl::Register tr = vixl::x18; // Thread Register
-static const vixl::Register kArtMethodRegister = vixl::w0; // Method register on invoke.
+static const vixl::Register kArtMethodRegister = vixl::x0; // Method register on invoke.
const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31);
@@ -94,7 +94,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<vixl::Register,
: CallingConvention(kRuntimeParameterCoreRegisters,
kRuntimeParameterCoreRegistersLength,
kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
+ kRuntimeParameterFpuRegistersLength,
+ kArm64PointerSize) {}
Location GetReturnLocation(Primitive::Type return_type);
@@ -108,7 +109,8 @@ class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl
: CallingConvention(kParameterCoreRegisters,
kParameterCoreRegistersLength,
kParameterFPRegisters,
- kParameterFPRegistersLength) {}
+ kParameterFPRegistersLength,
+ kArm64PointerSize) {}
Location GetReturnLocation(Primitive::Type return_type) {
return ARM64ReturnLocation(return_type);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 2848a48..60fd29b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -16,6 +16,7 @@
#include "code_generator_x86.h"
+#include "art_method.h"
#include "code_generator_utils.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -23,8 +24,7 @@
#include "intrinsics.h"
#include "intrinsics_x86.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/stack_checks.h"
@@ -1275,8 +1275,8 @@ void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
- invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kX86PointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1292,7 +1292,7 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(
- temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1307,8 +1307,8 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
- (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1328,7 +1328,7 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
@@ -3207,18 +3207,19 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
__ fs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(
- temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
} else {
// temp = method;
LoadCurrentMethod(temp);
if (!invoke->IsRecursive()) {
// temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
// temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ __ movl(temp, Address(temp,
+ CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
} else {
__ call(GetFrameEntryLabel());
}
@@ -4278,11 +4279,11 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ __ movl(out, Address(out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
@@ -4337,7 +4338,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
Register out = load->GetLocations()->Out().AsRegister<Register>();
codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ testl(out, out);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 5a5a37b..43214fe 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -52,7 +52,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmReg
: CallingConvention(kRuntimeParameterCoreRegisters,
kRuntimeParameterCoreRegistersLength,
kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
+ kRuntimeParameterFpuRegistersLength,
+ kX86PointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
@@ -64,7 +65,8 @@ class InvokeDexCallingConvention : public CallingConvention<Register, XmmRegiste
kParameterCoreRegisters,
kParameterCoreRegistersLength,
kParameterFpuRegisters,
- kParameterFpuRegistersLength) {}
+ kParameterFpuRegistersLength,
+ kX86PointerSize) {}
RegisterPair GetRegisterPairAt(size_t argument_index) {
DCHECK_LT(argument_index + 1, GetNumberOfRegisters());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e633970..b0174b9 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -16,14 +16,14 @@
#include "code_generator_x86_64.h"
+#include "art_method.h"
#include "code_generator_utils.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "intrinsics.h"
#include "intrinsics_x86_64.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "mirror/object_reference.h"
#include "thread.h"
#include "utils/assembler.h"
@@ -374,18 +374,19 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
// temp = thread->string_init_entrypoint
__ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
} else {
// temp = method;
LoadCurrentMethod(temp);
if (!invoke->IsRecursive()) {
// temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
// temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ __ movq(temp, Address(
+ temp, CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
} else {
__ call(&frame_entry_label_);
@@ -545,7 +546,7 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
}
}
- __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
+ __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
}
void CodeGeneratorX86_64::GenerateFrameExit() {
@@ -585,7 +586,7 @@ void CodeGeneratorX86_64::Bind(HBasicBlock* block) {
void CodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) {
DCHECK(RequiresCurrentMethod());
- __ movl(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
+ __ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
}
Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const {
@@ -1383,8 +1384,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
}
CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
- size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
- invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kX86_64PointerSize).SizeValue();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
@@ -1397,9 +1398,9 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
- __ movl(temp, Address(temp, method_offset));
+ __ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
@@ -1415,8 +1416,8 @@ void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
- (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
@@ -1434,9 +1435,9 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
- __ movl(temp, Address(temp, method_offset));
+ __ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
@@ -4125,11 +4126,11 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ __ movl(out, Address(out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -4174,7 +4175,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
CpuRegister out = load->GetLocations()->Out().AsRegister<CpuRegister>();
codegen_->LoadCurrentMethod(CpuRegister(out));
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ testl(out, out);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 480ea6b..4be401a 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -50,7 +50,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatR
: CallingConvention(kRuntimeParameterCoreRegisters,
kRuntimeParameterCoreRegistersLength,
kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
+ kRuntimeParameterFpuRegistersLength,
+ kX86_64PointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
@@ -62,7 +63,8 @@ class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegis
kParameterCoreRegisters,
kParameterCoreRegistersLength,
kParameterFloatRegisters,
- kParameterFloatRegistersLength) {}
+ kParameterFloatRegistersLength,
+ kX86_64PointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d88424c..8253a43 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -16,6 +16,7 @@
#include "inliner.h"
+#include "art_method-inl.h"
#include "builder.h"
#include "class_linker.h"
#include "constant_folding.h"
@@ -23,7 +24,6 @@
#include "driver/compiler_driver-inl.h"
#include "driver/dex_compilation_unit.h"
#include "instruction_simplifier.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
@@ -81,11 +81,10 @@ bool HInliner::TryInline(HInvoke* invoke_instruction,
hs.NewHandle(caller_compilation_unit_.GetClassLinker()->FindDexCache(caller_dex_file)));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(caller_compilation_unit_.GetClassLoader())));
- Handle<mirror::ArtMethod> resolved_method(hs.NewHandle(
- compiler_driver_->ResolveMethod(
- soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type)));
+ ArtMethod* resolved_method(compiler_driver_->ResolveMethod(
+ soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type));
- if (resolved_method.Get() == nullptr) {
+ if (resolved_method == nullptr) {
VLOG(compiler) << "Method cannot be resolved " << PrettyMethod(method_index, caller_dex_file);
return false;
}
@@ -149,7 +148,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction,
return true;
}
-bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method,
+bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
HInvoke* invoke_instruction,
uint32_t method_index,
bool can_use_dex_cache) const {
@@ -172,6 +171,7 @@ bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method,
graph_->GetArena(),
caller_dex_file,
method_index,
+ compiler_driver_->GetInstructionSet(),
graph_->IsDebuggable(),
graph_->GetCurrentInstructionId());
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 1dbc7d3..831bdf2 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -48,7 +48,7 @@ class HInliner : public HOptimization {
private:
bool TryInline(HInvoke* invoke_instruction, uint32_t method_index, InvokeType invoke_type) const;
- bool TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method,
+ bool TryBuildAndInline(ArtMethod* resolved_method,
HInvoke* invoke_instruction,
uint32_t method_index,
bool can_use_dex_cache) const;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index dccfe9a..db35b8f 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -17,11 +17,11 @@
#include "intrinsics_arm.h"
#include "arch/arm/instruction_set_features_arm.h"
+#include "art_method.h"
#include "code_generator_arm.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "intrinsics.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
#include "mirror/string.h"
#include "thread.h"
#include "utils/arm/assembler_arm.h"
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 2c4fab0..957373f 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -17,12 +17,12 @@
#include "intrinsics_arm64.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "art_method.h"
#include "code_generator_arm64.h"
#include "common_arm64.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "intrinsics.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
#include "mirror/string.h"
#include "thread.h"
#include "utils/arm64/assembler_arm64.h"
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 28b7a07..989dd0d 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -19,11 +19,11 @@
#include <limits>
#include "arch/x86/instruction_set_features_x86.h"
+#include "art_method.h"
#include "code_generator_x86.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "intrinsics.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
#include "mirror/string.h"
#include "thread.h"
#include "utils/x86/assembler_x86.h"
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 0efa714..c245cb6 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -19,11 +19,11 @@
#include <limits>
#include "arch/x86_64/instruction_set_features_x86_64.h"
+#include "art_method-inl.h"
#include "code_generator_x86_64.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "intrinsics.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
#include "mirror/string.h"
#include "thread.h"
#include "utils/x86_64/assembler_x86_64.h"
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 77b587e..ef60d76 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -120,6 +120,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
HGraph(ArenaAllocator* arena,
const DexFile& dex_file,
uint32_t method_idx,
+ InstructionSet instruction_set,
bool debuggable = false,
int start_instruction_id = 0)
: arena_(arena),
@@ -137,6 +138,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
current_instruction_id_(start_instruction_id),
dex_file_(dex_file),
method_idx_(method_idx),
+ instruction_set_(instruction_set),
cached_null_constant_(nullptr),
cached_int_constants_(std::less<int32_t>(), arena->Adapter()),
cached_float_constants_(std::less<int32_t>(), arena->Adapter()),
@@ -359,6 +361,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
// The method index in the dex file.
const uint32_t method_idx_;
+ const InstructionSet instruction_set_;
+
// Cached constants.
HNullConstant* cached_null_constant_;
ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_;
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 7aea249..b0d1433 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -31,7 +31,7 @@ namespace art {
// Run the tests only on host.
#ifndef HAVE_ANDROID_OS
-class OptimizingCFITest : public CFITest {
+class OptimizingCFITest : public CFITest {
public:
// Enable this flag to generate the expected outputs.
static constexpr bool kGenerateExpected = false;
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index 2125f6e..9ccc011 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -32,7 +32,7 @@ static constexpr uint8_t expected_cfi_kThumb2[] = {
// 0x00000012: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xE0, 0x0F, 0x1C, 0xB8, 0xF3, 0xD3, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9,
+ 0xE0, 0x0F, 0x1C, 0xF8, 0xF3, 0xD3, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9,
0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF3, 0xD3, 0x42, 0xA9,
0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
};
@@ -41,7 +41,7 @@ static constexpr uint8_t expected_cfi_kArm64[] = {
0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49,
0x44, 0xD3, 0xD4, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
-// 0x00000000: str w0, [sp, #-64]!
+// 0x00000000: str x0, [sp, #-64]!
// 0x00000004: .cfi_def_cfa_offset: 64
// 0x00000004: stp x19, x20, [sp, #40]
// 0x00000008: .cfi_offset: r19 at cfa-24
@@ -99,13 +99,13 @@ static constexpr uint8_t expected_cfi_kX86[] = {
static constexpr uint8_t expected_asm_kX86_64[] = {
0x55, 0x53, 0x48, 0x83, 0xEC, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24,
- 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x89, 0x3C, 0x24, 0xF2,
- 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24,
- 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3,
+ 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x48, 0x89, 0x3C, 0x24,
+ 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C,
+ 0x24, 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86_64[] = {
0x41, 0x0E, 0x10, 0x86, 0x04, 0x41, 0x0E, 0x18, 0x83, 0x06, 0x44, 0x0E,
- 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x43, 0x0A, 0x47, 0xDD, 0x47,
+ 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x44, 0x0A, 0x47, 0xDD, 0x47,
0xDE, 0x44, 0x0E, 0x18, 0x41, 0x0E, 0x10, 0xC3, 0x41, 0x0E, 0x08, 0xC6,
0x41, 0x0B, 0x0E, 0x40,
};
@@ -121,21 +121,20 @@ static constexpr uint8_t expected_cfi_kX86_64[] = {
// 0x0000000d: .cfi_offset: r30 at cfa-32
// 0x0000000d: movsd [rsp + 24], xmm12
// 0x00000014: .cfi_offset: r29 at cfa-40
-// 0x00000014: mov [rsp], edi
-// 0x00000017: .cfi_remember_state
-// 0x00000017: movsd xmm12, [rsp + 24]
-// 0x0000001e: .cfi_restore: r29
-// 0x0000001e: movsd xmm13, [rsp + 32]
-// 0x00000025: .cfi_restore: r30
-// 0x00000025: addq rsp, 40
-// 0x00000029: .cfi_def_cfa_offset: 24
-// 0x00000029: pop rbx
-// 0x0000002a: .cfi_def_cfa_offset: 16
-// 0x0000002a: .cfi_restore: r3
-// 0x0000002a: pop rbp
-// 0x0000002b: .cfi_def_cfa_offset: 8
-// 0x0000002b: .cfi_restore: r6
-// 0x0000002b: ret
-// 0x0000002c: .cfi_restore_state
-// 0x0000002c: .cfi_def_cfa_offset: 64
-
+// 0x00000014: movq [rsp], rdi
+// 0x00000018: .cfi_remember_state
+// 0x00000018: movsd xmm12, [rsp + 24]
+// 0x0000001f: .cfi_restore: r29
+// 0x0000001f: movsd xmm13, [rsp + 32]
+// 0x00000026: .cfi_restore: r30
+// 0x00000026: addq rsp, 40
+// 0x0000002a: .cfi_def_cfa_offset: 24
+// 0x0000002a: pop rbx
+// 0x0000002b: .cfi_def_cfa_offset: 16
+// 0x0000002b: .cfi_restore: r3
+// 0x0000002b: pop rbp
+// 0x0000002c: .cfi_def_cfa_offset: 8
+// 0x0000002c: .cfi_restore: r6
+// 0x0000002c: ret
+// 0x0000002d: .cfi_restore_state
+// 0x0000002d: .cfi_def_cfa_offset: 64
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 8bb5d8e..c7b2c67 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -19,6 +19,7 @@
#include <fstream>
#include <stdint.h>
+#include "art_method-inl.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/timing_logger.h"
@@ -44,7 +45,6 @@
#include "intrinsics.h"
#include "licm.h"
#include "jni/quick/jni_compiler.h"
-#include "mirror/art_method-inl.h"
#include "nodes.h"
#include "prepare_for_register_allocation.h"
#include "reference_type_propagation.h"
@@ -196,7 +196,7 @@ class OptimizingCompiler FINAL : public Compiler {
return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
}
- uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
+ uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
@@ -514,7 +514,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
ArenaAllocator arena(Runtime::Current()->GetArenaPool());
HGraph* graph = new (&arena) HGraph(
- &arena, dex_file, method_idx, compiler_driver->GetCompilerOptions().GetDebuggable());
+ &arena, dex_file, method_idx, compiler_driver->GetInstructionSet(),
+ compiler_driver->GetCompilerOptions().GetDebuggable());
// For testing purposes, we put a special marker on method names that should be compiled
// with this compiler. This makes sure we're not regressing.
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 4f8ec65..3ef96fa 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -74,7 +74,8 @@ void RemoveSuspendChecks(HGraph* graph) {
inline HGraph* CreateGraph(ArenaAllocator* allocator) {
return new (allocator) HGraph(
- allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1);
+ allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1, kRuntimeISA,
+ false);
}
// Create a control-flow graph from Dex instructions.
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 12b1c2b..e93e061 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -16,7 +16,7 @@
#include "reference_type_propagation.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "scoped_thread_state_change.h"
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index f53f846..5f439c8 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -71,7 +71,9 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
physical_fp_register_intervals_.SetSize(codegen->GetNumberOfFloatingPointRegisters());
// Always reserve for the current method and the graph's max out registers.
// TODO: compute it instead.
- reserved_out_slots_ = 1 + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
+ // ArtMethod* takes 2 vregs for 64 bits.
+ reserved_out_slots_ = InstructionSetPointerSize(codegen->GetInstructionSet()) / kVRegSize +
+ codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
}
bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 1da0563..cbbc116 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -378,7 +378,7 @@ static dwarf::Reg DWARFReg(SRegister reg) {
return dwarf::Reg::ArmFp(static_cast<int>(reg));
}
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = kArmPointerSize;
void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
@@ -415,7 +415,7 @@ void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
StoreToOffset(kStoreWord, R0, SP, 0);
// Write out entry spills.
- int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
+ int32_t offset = frame_size + kFramePointerSize;
for (size_t i = 0; i < entry_spills.size(); ++i) {
ArmManagedRegister reg = entry_spills.at(i).AsArm();
if (reg.IsNoRegister()) {
@@ -528,13 +528,13 @@ void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
- MemberOffset offs) {
+void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
ArmManagedRegister dst = mdest.AsArm();
CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
base.AsArm().AsCoreRegister(), offs.Int32Value());
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
rsb(dst.AsCoreRegister(), dst.AsCoreRegister(), ShifterOperand(0));
}
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index ce4c741..c673c6b 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -693,9 +693,10 @@ class ArmAssembler : public Assembler {
void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 3ee79a1..7d98a30 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -293,14 +293,14 @@ void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
}
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base,
- MemberOffset offs) {
+void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs,
+ bool poison_reference) {
Arm64ManagedRegister dst = m_dst.AsArm64();
Arm64ManagedRegister base = m_base.AsArm64();
CHECK(dst.IsXRegister() && base.IsXRegister());
LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
offs.Int32Value());
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
WRegister ref_reg = dst.AsOverlappingWRegister();
___ Neg(reg_w(ref_reg), vixl::Operand(reg_w(ref_reg)));
}
@@ -535,7 +535,7 @@ void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scrat
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
// Call *(*(SP + base) + offset)
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, base.Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
___ Blr(reg_x(scratch.AsXRegister()));
}
@@ -544,8 +544,9 @@ void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegiste
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
-void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs,
- ManagedRegister m_in_reg, bool null_allowed) {
+void Arm64Assembler::CreateHandleScopeEntry(
+ ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg,
+ bool null_allowed) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
// For now we only hold stale handle scope entries in x registers.
@@ -571,7 +572,7 @@ void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffs
}
void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
- ManagedRegister m_scratch, bool null_allowed) {
+ ManagedRegister m_scratch, bool null_allowed) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
if (null_allowed) {
@@ -590,7 +591,7 @@ void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset han
}
void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
- ManagedRegister m_in_reg) {
+ ManagedRegister m_in_reg) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
CHECK(out_reg.IsXRegister()) << out_reg;
@@ -706,7 +707,7 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
// Increase frame to required size.
DCHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>));
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
IncreaseFrameSize(frame_size);
// Save callee-saves.
@@ -720,13 +721,12 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
DCHECK(core_reg_list.IncludesAliasOf(reg_x(ETR)));
___ Mov(reg_x(ETR), reg_x(TR));
- // Write StackReference<Method>.
+ // Write ArtMethod*
DCHECK(X0 == method_reg.AsArm64().AsXRegister());
- DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
- StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0);
+ StoreToOffset(X0, SP, 0);
// Write out entry spills
- int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
+ int32_t offset = frame_size + kArm64PointerSize;
for (size_t i = 0; i < entry_spills.size(); ++i) {
Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
if (reg.IsNoRegister()) {
@@ -768,7 +768,7 @@ void Arm64Assembler::RemoveFrame(size_t frame_size,
// For now we only check that the size of the frame is large enough to hold spills and method
// reference.
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>));
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
DCHECK_ALIGNED(frame_size, kStackAlignment);
// Note: This is specific to JNI method frame.
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b1b66ed..fa9faed 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -113,8 +113,9 @@ class Arm64Assembler FINAL : public Assembler {
// Load routines.
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 2e3a47b..672e150 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -434,8 +434,10 @@ class Assembler {
virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size);
virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size);
- virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
- virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) = 0;
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
+ // If poison_reference is true and kPoisonReference is true, then we negate the read reference.
+ virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) = 0;
virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
diff --git a/compiler/utils/dex_cache_arrays_layout-inl.h b/compiler/utils/dex_cache_arrays_layout-inl.h
index a71eece..fec981a 100644
--- a/compiler/utils/dex_cache_arrays_layout-inl.h
+++ b/compiler/utils/dex_cache_arrays_layout-inl.h
@@ -25,12 +25,6 @@
#include "mirror/array-inl.h"
#include "primitive.h"
-namespace mirror {
-class ArtMethod;
-class Class;
-class String;
-} // namespace mirror
-
namespace art {
inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
@@ -40,7 +34,7 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const Dex
strings_offset_(methods_offset_ + MethodsSize(dex_file->NumMethodIds())),
fields_offset_(strings_offset_ + StringsSize(dex_file->NumStringIds())),
size_(fields_offset_ + FieldsSize(dex_file->NumFieldIds())) {
- DCHECK(pointer_size == 4u || pointer_size == 8u);
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
}
inline size_t DexCacheArraysLayout::TypeOffset(uint32_t type_idx) const {
@@ -52,12 +46,11 @@ inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
}
inline size_t DexCacheArraysLayout::MethodOffset(uint32_t method_idx) const {
- return methods_offset_ + ElementOffset(
- sizeof(mirror::HeapReference<mirror::ArtMethod>), method_idx);
+ return methods_offset_ + ElementOffset(pointer_size_, method_idx);
}
inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
- return ArraySize(sizeof(mirror::HeapReference<mirror::ArtMethod>), num_elements);
+ return ArraySize(pointer_size_, num_elements);
}
inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index e769489..e55b461 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -696,13 +696,13 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
}
-void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
- MemberOffset offs) {
+void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
MipsManagedRegister dest = mdest.AsMips();
CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
base.AsMips().AsCoreRegister(), offs.Int32Value());
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
Subu(dest.AsCoreRegister(), ZERO, dest.AsCoreRegister());
}
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 34713e1..7b0fc39 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -189,9 +189,10 @@ class MipsAssembler FINAL : public Assembler {
void LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index b95e436..a8b55d1 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -601,10 +601,10 @@ void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
// Write out Method*.
- StoreToOffset(kStoreWord, method_reg.AsMips64().AsGpuRegister(), SP, 0);
+ StoreToOffset(kStoreDoubleword, method_reg.AsMips64().AsGpuRegister(), SP, 0);
// Write out entry spills.
- int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
+ int32_t offset = frame_size + kFramePointerSize;
for (size_t i = 0; i < entry_spills.size(); ++i) {
Mips64ManagedRegister reg = entry_spills.at(i).AsMips64();
ManagedRegisterSpill spill = entry_spills.at(i);
@@ -750,12 +750,13 @@ void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), SP, src.Int32Value());
}
-void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) {
+void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
base.AsMips64().AsGpuRegister(), offs.Int32Value());
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
Subu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
}
}
@@ -1004,7 +1005,7 @@ void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscr
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
// Call *(*(SP + base) + offset)
- LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(),
+ LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
SP, base.Int32Value());
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
scratch.AsGpuRegister(), offset.Int32Value());
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 95ba967..38419ab 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -188,9 +188,10 @@ class Mips64Assembler FINAL : public Assembler {
void LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 7e75200..390d46e 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1724,9 +1724,9 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
// return address then method on stack.
- int32_t adjust = frame_size - (gpr_count * kFramePointerSize) -
- sizeof(StackReference<mirror::ArtMethod>) /*method*/ -
- kFramePointerSize /*return address*/;
+ int32_t adjust = frame_size - gpr_count * kFramePointerSize -
+ kFramePointerSize /*method*/ -
+ kFramePointerSize /*return address*/;
addl(ESP, Immediate(-adjust));
cfi_.AdjustCFAOffset(adjust);
pushl(method_reg.AsX86().AsCpuRegister());
@@ -1750,12 +1750,11 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
}
-void X86Assembler::RemoveFrame(size_t frame_size,
- const std::vector<ManagedRegister>& spill_regs) {
+void X86Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& spill_regs) {
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi_.RememberState();
- int adjust = frame_size - (spill_regs.size() * kFramePointerSize) -
- sizeof(StackReference<mirror::ArtMethod>);
+ // -kFramePointerSize for ArtMethod*.
+ int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
addl(ESP, Immediate(adjust));
cfi_.AdjustCFAOffset(-adjust);
for (size_t i = 0; i < spill_regs.size(); ++i) {
@@ -1904,18 +1903,18 @@ void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src,
}
}
-void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
X86ManagedRegister dest = mdest.AsX86();
CHECK(dest.IsCpuRegister());
movl(dest.AsCpuRegister(), Address(ESP, src));
}
-void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
- MemberOffset offs) {
+void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
X86ManagedRegister dest = mdest.AsX86();
CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
negl(dest.AsCpuRegister());
}
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 5319dac..1c1c023 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -538,9 +538,10 @@ class X86Assembler FINAL : public Assembler {
void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index feceeca..ac95c71 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2388,9 +2388,9 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
}
- DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
+ DCHECK_EQ(kX86_64PointerSize, kFramePointerSize);
- movl(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
+ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
for (size_t i = 0; i < entry_spills.size(); ++i) {
ManagedRegisterSpill spill = entry_spills.at(i);
@@ -2590,18 +2590,18 @@ void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> sr
}
}
-void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
X86_64ManagedRegister dest = mdest.AsX86_64();
CHECK(dest.IsCpuRegister());
movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
}
-void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
- MemberOffset offs) {
+void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) {
X86_64ManagedRegister dest = mdest.AsX86_64();
CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
- if (kPoisonHeapReferences) {
+ if (kPoisonHeapReferences && poison_reference) {
negl(dest.AsCpuRegister());
}
}
@@ -2667,8 +2667,7 @@ void X86_64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t s
}
}
-void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
+void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
CHECK(scratch.IsCpuRegister());
movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
@@ -2693,9 +2692,8 @@ void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
}
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch,
- size_t size) {
+void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch,
+ size_t size) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
if (scratch.IsCpuRegister() && size == 8) {
Load(scratch, src, 4);
@@ -2834,7 +2832,7 @@ void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister
void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- movl(scratch, Address(CpuRegister(RSP), base));
+ movq(scratch, Address(CpuRegister(RSP), base));
call(Address(scratch, offset));
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 7daf994..6b2b65d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -668,7 +668,8 @@ class X86_64Assembler FINAL : public Assembler {
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool poison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index dcffe35..b86bc85 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1127,7 +1127,7 @@ std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBU
ssize_t displacement = static_cast<ssize_t>(frame_size) - (spill_regs.size() * 8 + 8);
str << "subq $" << displacement << ", %rsp\n";
// 3) Store method reference.
- str << "movl %edi, (%rsp)\n";
+ str << "movq %rdi, (%rsp)\n";
// 4) Entry spills.
str << "movq %rax, " << frame_size + 0 << "(%rsp)\n";
str << "movq %rbx, " << frame_size + 8 << "(%rsp)\n";