summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorNicolas Geoffray <ngeoffray@google.com>2014-12-01 12:28:51 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-12-01 12:28:51 +0000
commit89b53873b29e9e93fa6ba49c9685b84c60c76a4c (patch)
treef5c860c8f84d3ae77972e94eddfdefadd5d58dc1 /compiler
parent672db0289de1dec9513da14153f315fecb78649e (diff)
parent32f5b4d2c8c9b52e9522941c159577b21752d0fa (diff)
downloadart-89b53873b29e9e93fa6ba49c9685b84c60c76a4c.zip
art-89b53873b29e9e93fa6ba49c9685b84c60c76a4c.tar.gz
art-89b53873b29e9e93fa6ba49c9685b84c60c76a4c.tar.bz2
Merge "Vixl: Update the VIXL interface to VIXL 1.7 and enable VIXL debug."
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk6
-rw-r--r--compiler/oat_writer.cc2
-rw-r--r--compiler/optimizing/code_generator.cc10
-rw-r--r--compiler/optimizing/code_generator.h1
-rw-r--r--compiler/optimizing/code_generator_arm64.cc6
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/trampolines/trampoline_compiler.cc1
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc12
8 files changed, 28 insertions, 12 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 84176a1..70c7e52 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -279,7 +279,11 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
# Vixl assembly support for ARM64 targets.
- LOCAL_SHARED_LIBRARIES += libvixl
+ ifeq ($$(art_ndebug_or_debug),debug)
+ LOCAL_SHARED_LIBRARIES += libvixld
+ else
+ LOCAL_SHARED_LIBRARIES += libvixl
+ endif
ifeq ($$(art_target_or_host),target)
# For atrace.
LOCAL_SHARED_LIBRARIES += libcutils
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index a57f892..8a7abb4 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -366,6 +366,8 @@ class OatWriter::Arm64RelativeCallPatcher FINAL : public ArmBaseRelativeCallPatc
Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
+ // Ensure we emit the literal pool.
+ assembler.EmitSlowPaths();
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
assembler.FinalizeInstructions(code);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6cacd4f..e581af2 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -71,11 +71,7 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
}
}
GenerateSlowPaths();
-
- size_t code_size = GetAssembler()->CodeSize();
- uint8_t* buffer = allocator->Allocate(code_size);
- MemoryRegion code(buffer, code_size);
- GetAssembler()->FinalizeInstructions(code);
+ Finalize(allocator);
}
void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
@@ -97,9 +93,13 @@ void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
}
}
GenerateSlowPaths();
+ Finalize(allocator);
+}
+void CodeGenerator::Finalize(CodeAllocator* allocator) {
size_t code_size = GetAssembler()->CodeSize();
uint8_t* buffer = allocator->Allocate(code_size);
+
MemoryRegion code(buffer, code_size);
GetAssembler()->FinalizeInstructions(code);
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 321a31f..4c0d3ea 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -90,6 +90,7 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
}
virtual void Initialize() = 0;
+ virtual void Finalize(CodeAllocator* allocator);
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
virtual void Bind(HBasicBlock* block) = 0;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0fc4307..8d43a5d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -479,6 +479,12 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
#undef __
#define __ GetVIXLAssembler()->
+void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
+ // Ensure we emit the literal pool.
+ __ FinalizeCode();
+ CodeGenerator::Finalize(allocator);
+}
+
void CodeGeneratorARM64::GenerateFrameEntry() {
// TODO: Add proper support for the stack overflow check.
UseScratchRegisterScope temps(GetVIXLAssembler());
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a40f27f..236a04d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -232,6 +232,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
}
}
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
// Code generation helpers.
void MoveConstant(vixl::CPURegister destination, HConstant* constant);
void MoveHelper(Location destination, Location source, Primitive::Type type);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 733b58f..cb07ffa 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -83,6 +83,7 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention
break;
}
+ assembler->EmitSlowPaths();
size_t cs = assembler->CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 390f2ea..21014c8 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -329,12 +329,12 @@ void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t s
if (dst.IsXRegister()) {
if (size == 4) {
CHECK(src.IsWRegister());
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
} else {
if (src.IsXRegister()) {
___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
} else {
- ___ Mov(reg_x(dst.AsXRegister()), reg_w(src.AsWRegister()));
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
}
}
} else if (dst.IsWRegister()) {
@@ -484,9 +484,9 @@ void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}
@@ -495,9 +495,9 @@ void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
CHECK(size == 1 || size == 2) << size;
CHECK(reg.IsWRegister()) << reg;
if (size == 1) {
- ___ uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
} else {
- ___ uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
}
}