summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk1
-rw-r--r--build/Android.gtest.mk4
-rw-r--r--compiler/Android.mk6
-rw-r--r--compiler/oat_writer.cc2
-rw-r--r--compiler/optimizing/code_generator.cc10
-rw-r--r--compiler/optimizing/code_generator.h1
-rw-r--r--compiler/optimizing/code_generator_arm64.cc6
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/trampolines/trampoline_compiler.cc1
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc12
-rw-r--r--disassembler/Android.mk6
-rw-r--r--disassembler/disassembler_arm64.cc2
12 files changed, 37 insertions, 16 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index c6e2b95..b3d1380 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -248,6 +248,7 @@ art_non_debug_cflags := \
art_debug_cflags := \
-O2 \
-DDYNAMIC_ANNOTATIONS_ENABLED=1 \
+ -DVIXL_DEBUG \
-UNDEBUG
art_host_non_debug_cflags := $(art_non_debug_cflags)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 7e28b37..10b0400 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -368,7 +368,7 @@ define define-art-gtest
ifeq ($$(art_target_or_host),target)
$$(eval $$(call set-target-local-clang-vars))
$$(eval $$(call set-target-local-cflags-vars,debug))
- LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixl
+ LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixld
LOCAL_MODULE_PATH_32 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
@@ -404,7 +404,7 @@ test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_
else # host
LOCAL_CLANG := $$(ART_HOST_CLANG)
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
- LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl
+ LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixld
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 84176a1..70c7e52 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -279,7 +279,11 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
# Vixl assembly support for ARM64 targets.
- LOCAL_SHARED_LIBRARIES += libvixl
+ ifeq ($$(art_ndebug_or_debug),debug)
+ LOCAL_SHARED_LIBRARIES += libvixld
+ else
+ LOCAL_SHARED_LIBRARIES += libvixl
+ endif
ifeq ($$(art_target_or_host),target)
# For atrace.
LOCAL_SHARED_LIBRARIES += libcutils
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index a57f892..8a7abb4 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -366,6 +366,8 @@ class OatWriter::Arm64RelativeCallPatcher FINAL : public ArmBaseRelativeCallPatc
Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
+ // Ensure we emit the literal pool.
+ assembler.EmitSlowPaths();
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
assembler.FinalizeInstructions(code);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6cacd4f..e581af2 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -71,11 +71,7 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
}
}
GenerateSlowPaths();
-
- size_t code_size = GetAssembler()->CodeSize();
- uint8_t* buffer = allocator->Allocate(code_size);
- MemoryRegion code(buffer, code_size);
- GetAssembler()->FinalizeInstructions(code);
+ Finalize(allocator);
}
void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
@@ -97,9 +93,13 @@ void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
}
}
GenerateSlowPaths();
+ Finalize(allocator);
+}
+void CodeGenerator::Finalize(CodeAllocator* allocator) {
size_t code_size = GetAssembler()->CodeSize();
uint8_t* buffer = allocator->Allocate(code_size);
+
MemoryRegion code(buffer, code_size);
GetAssembler()->FinalizeInstructions(code);
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 321a31f..4c0d3ea 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -90,6 +90,7 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
}
virtual void Initialize() = 0;
+ virtual void Finalize(CodeAllocator* allocator);
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
virtual void Bind(HBasicBlock* block) = 0;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0fc4307..8d43a5d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -479,6 +479,12 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
#undef __
#define __ GetVIXLAssembler()->
+void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
+ // Ensure we emit the literal pool.
+ __ FinalizeCode();
+ CodeGenerator::Finalize(allocator);
+}
+
void CodeGeneratorARM64::GenerateFrameEntry() {
// TODO: Add proper support for the stack overflow check.
UseScratchRegisterScope temps(GetVIXLAssembler());
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a40f27f..236a04d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -232,6 +232,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
}
}
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
// Code generation helpers.
void MoveConstant(vixl::CPURegister destination, HConstant* constant);
void MoveHelper(Location destination, Location source, Primitive::Type type);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 733b58f..cb07ffa 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -83,6 +83,7 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention
break;
}
+ assembler->EmitSlowPaths();
size_t cs = assembler->CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 390f2ea..21014c8 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -329,12 +329,12 @@ void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t s
if (dst.IsXRegister()) {
if (size == 4) {
CHECK(src.IsWRegister());
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
} else {
if (src.IsXRegister()) {
___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
} else {
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
}
}
} else if (dst.IsWRegister()) {
@@ -484,9 +484,9 @@ void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}
@@ -495,9 +495,9 @@ void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index f2dd1ee..3ad2941 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -85,7 +85,11 @@ define build-libart-disassembler
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
include external/libcxx/libcxx.mk
# For disassembler_arm64.
- LOCAL_SHARED_LIBRARIES += libvixl
+ ifeq ($$(art_ndebug_or_debug),debug)
+ LOCAL_SHARED_LIBRARIES += libvixld
+ else
+ LOCAL_SHARED_LIBRARIES += libvixl
+ endif
ifeq ($$(art_target_or_host),target)
include $(BUILD_SHARED_LIBRARY)
else # host
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index fe50421..bd3bebf 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -74,7 +74,7 @@ void CustomDisassembler::VisitLoadLiteral(const vixl::Instruction* instr) {
++buffer;
}
- void* data_address = instr->LiteralAddress();
+ void* data_address = instr->LiteralAddress<void*>();
ptrdiff_t buf_size_remaining = buffer_end - buffer;
vixl::Instr op = instr->Mask(vixl::LoadLiteralMask);