diff options
author | Serban Constantinescu <serban.constantinescu@arm.com> | 2014-02-11 14:15:10 +0000 |
---|---|---|
committer | Ian Rogers <irogers@google.com> | 2014-03-05 12:14:43 -0800 |
commit | ed8dd492e43cbaaa435c4892447072c84dbaf2dc (patch) | |
tree | e93e3d1e7dd6770ec8e2a3ed7105a7305eb2bc4d /compiler/utils/arm64 | |
parent | 511472b9110d15cf30a205bb514eb98f6afce651 (diff) | |
download | art-ed8dd492e43cbaaa435c4892447072c84dbaf2dc.zip art-ed8dd492e43cbaaa435c4892447072c84dbaf2dc.tar.gz art-ed8dd492e43cbaaa435c4892447072c84dbaf2dc.tar.bz2 |
AArch64: Add ARM64 Assembler
This patch adds the ARM64 Assembler and ManagedRegister backend.
The implementation of the Arm64Assembler class is based on VIXL (a
programmatic A64 Assembler - see external/vixl ).
Change-Id: I842fd574637a953c19631eedf26f6c70d9ed7f9e
Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com>
Diffstat (limited to 'compiler/utils/arm64')
-rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 616 | ||||
-rw-r--r-- | compiler/utils/arm64/assembler_arm64.h | 286 | ||||
-rw-r--r-- | compiler/utils/arm64/constants_arm64.h | 37 | ||||
-rw-r--r-- | compiler/utils/arm64/managed_register_arm64.cc | 116 | ||||
-rw-r--r-- | compiler/utils/arm64/managed_register_arm64.h | 224 | ||||
-rw-r--r-- | compiler/utils/arm64/managed_register_arm64_test.cc | 611 |
6 files changed, 1890 insertions, 0 deletions
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc new file mode 100644 index 0000000..b364ba0 --- /dev/null +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -0,0 +1,616 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler_arm64.h" +#include "base/logging.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "offsets.h" +#include "thread.h" +#include "utils.h" + +namespace art { +namespace arm64 { + +#ifdef ___ +#error "ARM64 Assembler macro already defined." +#else +#define ___ vixl_masm_-> +#endif + +void Arm64Assembler::EmitSlowPaths() { + if (!exception_blocks_.empty()) { + for (size_t i = 0; i < exception_blocks_.size(); i++) { + EmitExceptionPoll(exception_blocks_.at(i)); + } + } + ___ FinalizeCode(); +} + +size_t Arm64Assembler::CodeSize() const { + return ___ SizeOfCodeGenerated(); +} + +void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) { + // Copy the instructions from the buffer. + MemoryRegion from(reinterpret_cast<void*>(vixl_buf_), CodeSize()); + region.CopyFrom(0, from); +} + +void Arm64Assembler::GetCurrentThread(ManagedRegister tr) { + ___ Mov(reg_x(tr.AsArm64().AsCoreRegister()), reg_x(TR)); +} + +void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) { + StoreToOffset(TR, SP, offset.Int32Value()); +} + +// See Arm64 PCS Section 5.2.2.1. +void Arm64Assembler::IncreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, -adjust); +} + +// See Arm64 PCS Section 5.2.2.1. +void Arm64Assembler::DecreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, adjust); +} + +void Arm64Assembler::AddConstant(Register rd, int32_t value, Condition cond) { + AddConstant(rd, rd, value, cond); +} + +void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value, + Condition cond) { + if ((cond == AL) || (cond == NV)) { + // VIXL macro-assembler handles all variants. + ___ Add(reg_x(rd), reg_x(rn), value); + } else { + // ip1 = rd + value + // rd = cond ? ip1 : rn + CHECK_NE(rn, IP1); + ___ Add(reg_x(IP1), reg_x(rn), value); + ___ Csel(reg_x(rd), reg_x(IP1), reg_x(rd), COND_OP(cond)); + } +} + +void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source, + Register base, int32_t offset) { + switch (type) { + case kStoreByte: + ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset)); + break; + case kStoreHalfword: + ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset)); + break; + case kStoreWord: + ___ Str(reg_w(source), MEM_OP(reg_x(base), offset)); + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +void Arm64Assembler::StoreToOffset(Register source, Register base, int32_t offset) { + CHECK_NE(source, SP); + ___ Str(reg_x(source), MEM_OP(reg_x(base), offset)); +} + +void Arm64Assembler::StoreSToOffset(SRegister source, Register base, int32_t offset) { + ___ Str(reg_s(source), MEM_OP(reg_x(base), offset)); +} + +void Arm64Assembler::StoreDToOffset(DRegister source, Register base, int32_t offset) { + ___ Str(reg_d(source), MEM_OP(reg_x(base), offset)); +} + +void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) { + Arm64ManagedRegister src = m_src.AsArm64(); + if (src.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (src.IsWRegister()) { + CHECK_EQ(4u, size); + StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value()); + } else if (src.IsCoreRegister()) { + CHECK_EQ(8u, size); + StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value()); + } else if (src.IsSRegister()) { + StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value()); + } else { + CHECK(src.IsDRegister()) << src; + StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value()); + } +} + +void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) { + Arm64ManagedRegister src = m_src.AsArm64(); + CHECK(src.IsCoreRegister()) << src; + StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value()); +} + +void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) { + Arm64ManagedRegister src = m_src.AsArm64(); + CHECK(src.IsCoreRegister()) << src; + StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value()); +} + +void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(scratch.AsCoreRegister(), SP, offs.Int32Value()); +} + +void Arm64Assembler::StoreImmediateToThread(ThreadOffset offs, uint32_t imm, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(scratch.AsCoreRegister(), TR, offs.Int32Value()); +} + +void Arm64Assembler::StoreStackOffsetToThread(ThreadOffset tr_offs, + FrameOffset fr_offs, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsCoreRegister()) << scratch; + AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value()); +} + +void Arm64Assembler::StoreStackPointerToThread(ThreadOffset tr_offs) { + // Arm64 does not support: "str sp, [dest]" therefore we use IP1 as a temp reg. + ___ Mov(reg_x(IP1), reg_x(SP)); + StoreToOffset(IP1, TR, tr_offs.Int32Value()); +} + +void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source, + FrameOffset in_off, ManagedRegister m_scratch) { + Arm64ManagedRegister source = m_source.AsArm64(); + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + StoreToOffset(source.AsCoreRegister(), SP, dest_off.Int32Value()); + LoadFromOffset(scratch.AsCoreRegister(), SP, in_off.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), SP, dest_off.Int32Value() + 8); +} + +// Load routines. +void Arm64Assembler::LoadImmediate(Register dest, int32_t value, + Condition cond) { + if ((cond == AL) || (cond == NV)) { + ___ Mov(reg_x(dest), value); + } else { + // ip1 = value + // rd = cond ? ip1 : rd + if (value != 0) { + CHECK_NE(dest, IP1); + ___ Mov(reg_x(IP1), value); + ___ Csel(reg_x(dest), reg_x(IP1), reg_x(dest), COND_OP(cond)); + } else { + ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), COND_OP(cond)); + } + } +} + +void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest, + Register base, int32_t offset) { + switch (type) { + case kLoadSignedByte: + ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + case kLoadSignedHalfword: + ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + case kLoadUnsignedByte: + ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + case kLoadUnsignedHalfword: + ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + case kLoadWord: + ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +// Note: We can extend this member by adding load type info - see +// sign extended A64 load variants. +void Arm64Assembler::LoadFromOffset(Register dest, Register base, + int32_t offset) { + CHECK_NE(dest, SP); + ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset)); +} + +void Arm64Assembler::LoadSFromOffset(SRegister dest, Register base, + int32_t offset) { + ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset)); +} + +void Arm64Assembler::LoadDFromOffset(DRegister dest, Register base, + int32_t offset) { + ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset)); +} + +void Arm64Assembler::Load(Arm64ManagedRegister dest, Register base, + int32_t offset, size_t size) { + if (dest.IsNoRegister()) { + CHECK_EQ(0u, size) << dest; + } else if (dest.IsWRegister()) { + CHECK_EQ(4u, size) << dest; + ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset)); + } else if (dest.IsCoreRegister()) { + CHECK_EQ(8u, size) << dest; + CHECK_NE(dest.AsCoreRegister(), SP) << dest; + ___ Ldr(reg_x(dest.AsCoreRegister()), MEM_OP(reg_x(base), offset)); + } else if (dest.IsSRegister()) { + ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset)); + } else { + CHECK(dest.IsDRegister()) << dest; + ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset)); + } +} + +void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { + return Load(m_dst.AsArm64(), SP, src.Int32Value(), size); +} + +void Arm64Assembler::Load(ManagedRegister m_dst, ThreadOffset src, size_t size) { + return Load(m_dst.AsArm64(), TR, src.Int32Value(), size); +} + +void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + CHECK(dst.IsCoreRegister()) << dst; + LoadFromOffset(dst.AsCoreRegister(), SP, offs.Int32Value()); +} + +void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, + MemberOffset offs) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + Arm64ManagedRegister base = m_base.AsArm64(); + CHECK(dst.IsCoreRegister() && base.IsCoreRegister()); + LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value()); +} + +void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + Arm64ManagedRegister base = m_base.AsArm64(); + CHECK(dst.IsCoreRegister() && base.IsCoreRegister()); + LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value()); +} + +void Arm64Assembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset offs) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + CHECK(dst.IsCoreRegister()) << dst; + LoadFromOffset(dst.AsCoreRegister(), TR, offs.Int32Value()); +} + +// Copying routines. +void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + Arm64ManagedRegister src = m_src.AsArm64(); + if (!dst.Equals(src)) { + if (dst.IsCoreRegister()) { + CHECK(src.IsCoreRegister()) << src; + ___ Mov(reg_x(dst.AsCoreRegister()), reg_x(src.AsCoreRegister())); + } else if (dst.IsWRegister()) { + CHECK(src.IsWRegister()) << src; + ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister())); + } else if (dst.IsSRegister()) { + CHECK(src.IsSRegister()) << src; + ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister())); + } else { + CHECK(dst.IsDRegister()) << dst; + CHECK(src.IsDRegister()) << src; + ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister())); + } + } +} + +void Arm64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset tr_offs, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); +} + +void Arm64Assembler::CopyRawPtrToThread(ThreadOffset tr_offs, + FrameOffset fr_offs, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value()); +} + +void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value()); +} + +void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src, + ManagedRegister m_scratch, size_t size) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value()); + } else if (size == 8) { + LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value()); + } else { + UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; + } +} + +void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister m_scratch, size_t size) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + Arm64ManagedRegister base = src_base.AsArm64(); + CHECK(base.IsCoreRegister()) << base; + CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsCoreRegister(), + src_offset.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value()); + } else if (size == 8) { + LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value()); + } else { + UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; + } +} + +void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src, + ManagedRegister m_scratch, size_t size) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + Arm64ManagedRegister base = m_dest_base.AsArm64(); + CHECK(base.IsCoreRegister()) << base; + CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsCoreRegister(), + dest_offs.Int32Value()); + } else if (size == 8) { + LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), dest_offs.Int32Value()); + } else { + UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; + } +} + +void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/, + ManagedRegister /*mscratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant"; +} + +void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset, + ManagedRegister m_src, Offset src_offset, + ManagedRegister m_scratch, size_t size) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + Arm64ManagedRegister src = m_src.AsArm64(); + Arm64ManagedRegister dest = m_dest.AsArm64(); + CHECK(dest.IsCoreRegister()) << dest; + CHECK(src.IsCoreRegister()) << src; + CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsCoreRegister(), + src_offset.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsCoreRegister(), + dest_offset.Int32Value()); + } else if (size == 8) { + LoadFromOffset(scratch.AsCoreRegister(), src.AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), dest.AsCoreRegister(), dest_offset.Int32Value()); + } else { + UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; + } +} + +void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, + FrameOffset /*src*/, Offset /*src_offset*/, + ManagedRegister /*scratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant"; +} + +void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) { + // TODO: Should we check that m_scratch is IP? - see arm. +#if ANDROID_SMP != 0 + ___ Dmb(vixl::InnerShareable, vixl::BarrierAll); +#endif +} + +void Arm64Assembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no sign extension necessary for Arm64"; +} + +void Arm64Assembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no zero extension necessary for Arm64"; +} + +void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { + // TODO: not validating references. +} + +void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { + // TODO: not validating references. +} + +void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) { + Arm64ManagedRegister base = m_base.AsArm64(); + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(base.IsCoreRegister()) << base; + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value()); + ___ Blr(reg_x(scratch.AsCoreRegister())); +} + +void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsCoreRegister()) << scratch; + // Call *(*(SP + base) + offset) + LoadFromOffset(scratch.AsCoreRegister(), SP, base.Int32Value()); + LoadFromOffset(scratch.AsCoreRegister(), scratch.AsCoreRegister(), offs.Int32Value()); + ___ Blr(reg_x(scratch.AsCoreRegister())); +} + +void Arm64Assembler::Call(ThreadOffset /*offset*/, ManagedRegister /*scratch*/) { + UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant"; +} + +void Arm64Assembler::CreateSirtEntry(ManagedRegister m_out_reg, FrameOffset sirt_offs, + ManagedRegister m_in_reg, bool null_allowed) { + Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); + Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); + // For now we only hold stale sirt entries in x registers. + CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg; + CHECK(out_reg.IsCoreRegister()) << out_reg; + if (null_allowed) { + // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is + // the address in the SIRT holding the reference. + // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) + if (in_reg.IsNoRegister()) { + LoadFromOffset(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value()); + in_reg = out_reg; + } + ___ Cmp(reg_x(in_reg.AsCoreRegister()), 0); + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); + } + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), NE); + } else { + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), AL); + } +} + +void Arm64Assembler::CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, + ManagedRegister m_scratch, bool null_allowed) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsCoreRegister()) << scratch; + if (null_allowed) { + LoadFromOffset(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); + // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is + // the address in the SIRT holding the reference. + // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset) + ___ Cmp(reg_x(scratch.AsCoreRegister()), 0); + // Move this logic in add constants with flags. + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE); + } else { + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL); + } + StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value()); +} + +void Arm64Assembler::LoadReferenceFromSirt(ManagedRegister m_out_reg, + ManagedRegister m_in_reg) { + Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); + Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); + CHECK(out_reg.IsCoreRegister()) << out_reg; + CHECK(in_reg.IsCoreRegister()) << in_reg; + vixl::Label exit; + if (!out_reg.Equals(in_reg)) { + // FIXME: Who sets the flags here? + LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); + } + ___ Cmp(reg_x(in_reg.AsCoreRegister()), 0); + ___ B(&exit, COND_OP(EQ)); + LoadFromOffset(out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0); + ___ Bind(&exit); +} + +void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) { + CHECK_ALIGNED(stack_adjust, kStackAlignment); + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust); + exception_blocks_.push_back(current_exception); + LoadFromOffset(scratch.AsCoreRegister(), TR, Thread::ExceptionOffset().Int32Value()); + ___ Cmp(reg_x(scratch.AsCoreRegister()), 0); + ___ B(current_exception->Entry(), COND_OP(NE)); +} + +void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) { + // Bind exception poll entry. + ___ Bind(exception->Entry()); + if (exception->stack_adjust_ != 0) { // Fix up the frame. + DecreaseFrameSize(exception->stack_adjust_); + } + // Pass exception object as argument. + // Don't care about preserving X0 as this won't return. + ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsCoreRegister())); + LoadFromOffset(IP1, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException).Int32Value()); + ___ Blr(reg_x(IP1)); + // Call should never return. + ___ Brk(); +} + +void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector<ManagedRegister>& callee_save_regs, + const std::vector<ManagedRegister>& entry_spills) { + CHECK_ALIGNED(frame_size, kStackAlignment); + CHECK(X0 == method_reg.AsArm64().AsCoreRegister()); + + // TODO: *create APCS FP - end of FP chain; + // *add support for saving a different set of callee regs. + // For now we check that the size of callee regs vector is 20 + // equivalent to the APCS callee saved regs [X19, x30] [D8, D15]. + CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize); + ___ PushCalleeSavedRegisters(); + + // Increate frame to required size - must be at least space to push Method*. + CHECK_GT(frame_size, kCalleeSavedRegsSize * kPointerSize); + size_t adjust = frame_size - (kCalleeSavedRegsSize * kPointerSize); + IncreaseFrameSize(adjust); + + // Write Method*. + StoreToOffset(X0, SP, 0); + + // Write out entry spills, treated as X regs. + // TODO: we can implement a %2 STRP variant of StoreToOffset. + for (size_t i = 0; i < entry_spills.size(); ++i) { + Register reg = entry_spills.at(i).AsArm64().AsCoreRegister(); + StoreToOffset(reg, SP, frame_size + kPointerSize + (i * kPointerSize)); + } +} + +void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs) { + CHECK_ALIGNED(frame_size, kStackAlignment); + + // For now we only check that the size of the frame is greater than the + // no of APCS callee saved regs [X19, X30] [D8, D15]. + CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize); + CHECK_GT(frame_size, kCalleeSavedRegsSize * kPointerSize); + + // Decrease frame size to start of callee saved regs. + size_t adjust = frame_size - (kCalleeSavedRegsSize * kPointerSize); + DecreaseFrameSize(adjust); + + // Pop callee saved and return to LR. + ___ PopCalleeSavedRegisters(); + ___ Ret(); +} + +} // namespace arm64 +} // namespace art diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h new file mode 100644 index 0000000..70df252 --- /dev/null +++ b/compiler/utils/arm64/assembler_arm64.h @@ -0,0 +1,286 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_ +#define ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_ + +#include <vector> + +#include "base/logging.h" +#include "constants_arm64.h" +#include "utils/arm64/managed_register_arm64.h" +#include "utils/assembler.h" +#include "offsets.h" +#include "utils.h" +#include "UniquePtr.h" +#include "a64/macro-assembler-a64.h" +#include "a64/disasm-a64.h" + +namespace art { +namespace arm64 { + +#define MEM_OP(x...) vixl::MemOperand(x) +#define COND_OP(x) static_cast<vixl::Condition>(x) + +enum Condition { + kNoCondition = -1, + EQ = 0, + NE = 1, + HS = 2, + LO = 3, + MI = 4, + PL = 5, + VS = 6, + VC = 7, + HI = 8, + LS = 9, + GE = 10, + LT = 11, + GT = 12, + LE = 13, + AL = 14, // Always. + NV = 15, // Behaves as always/al. + kMaxCondition = 16, +}; + +enum LoadOperandType { + kLoadSignedByte, + kLoadUnsignedByte, + kLoadSignedHalfword, + kLoadUnsignedHalfword, + kLoadWord, + kLoadCoreWord, + kLoadSWord, + kLoadDWord +}; + +enum StoreOperandType { + kStoreByte, + kStoreHalfword, + kStoreWord, + kStoreCoreWord, + kStoreSWord, + kStoreDWord +}; + +class Arm64Exception; + +class Arm64Assembler : public Assembler { + public: + Arm64Assembler() : vixl_buf_(new byte[BUF_SIZE]), + vixl_masm_(new vixl::MacroAssembler(vixl_buf_, BUF_SIZE)) {} + + virtual ~Arm64Assembler() { + if (kIsDebugBuild) { + vixl::Decoder *decoder = new vixl::Decoder(); + vixl::PrintDisassembler *test = new vixl::PrintDisassembler(stdout); + decoder->AppendVisitor(test); + + for (size_t i = 0; i < CodeSize() / vixl::kInstructionSize; ++i) { + vixl::Instruction *instr = + reinterpret_cast<vixl::Instruction*>(vixl_buf_ + i * vixl::kInstructionSize); + decoder->Decode(instr); + } + } + delete[] vixl_buf_; + } + + // Emit slow paths queued during assembly. + void EmitSlowPaths(); + + // Size of generated code. + size_t CodeSize() const; + + // Copy instructions out of assembly buffer into the given region of memory. + void FinalizeInstructions(const MemoryRegion& region); + + // Emit code that will create an activation on the stack. + void BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector<ManagedRegister>& callee_save_regs, + const std::vector<ManagedRegister>& entry_spills); + + // Emit code that will remove an activation from the stack. + void RemoveFrame(size_t frame_size, + const std::vector<ManagedRegister>& callee_save_regs); + + void IncreaseFrameSize(size_t adjust); + void DecreaseFrameSize(size_t adjust); + + // Store routines. + void Store(FrameOffset offs, ManagedRegister src, size_t size); + void StoreRef(FrameOffset dest, ManagedRegister src); + void StoreRawPtr(FrameOffset dest, ManagedRegister src); + void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister scratch); + void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister scratch); + void StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch); + void StoreStackPointerToThread(ThreadOffset thr_offs); + void StoreSpanning(FrameOffset dest, ManagedRegister src, + FrameOffset in_off, ManagedRegister scratch); + + // Load routines. + void Load(ManagedRegister dest, FrameOffset src, size_t size); + void Load(ManagedRegister dest, ThreadOffset src, size_t size); + void LoadRef(ManagedRegister dest, FrameOffset src); + void LoadRef(ManagedRegister dest, ManagedRegister base, + MemberOffset offs); + void LoadRawPtr(ManagedRegister dest, ManagedRegister base, + Offset offs); + void LoadRawPtrFromThread(ManagedRegister dest, + ThreadOffset offs); + // Copying routines. + void Move(ManagedRegister dest, ManagedRegister src, size_t size); + void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, + ManagedRegister scratch); + void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, + ManagedRegister scratch); + void CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister scratch); + void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size); + void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister scratch, size_t size); + void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister scratch, size_t size); + void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, + ManagedRegister scratch, size_t size); + void Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister scratch, size_t size); + void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, + ManagedRegister scratch, size_t size); + void MemoryBarrier(ManagedRegister scratch); + + // Sign extension. + void SignExtend(ManagedRegister mreg, size_t size); + + // Zero extension. + void ZeroExtend(ManagedRegister mreg, size_t size); + + // Exploit fast access in managed code to Thread::Current(). + void GetCurrentThread(ManagedRegister tr); + void GetCurrentThread(FrameOffset dest_offset, + ManagedRegister scratch); + + // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. in_reg holds a possibly stale reference + // that can be used to avoid loading the SIRT entry to see if the value is + // NULL. + void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, + ManagedRegister in_reg, bool null_allowed); + + // Set up out_off to hold a Object** into the SIRT, or to be NULL if the + // value is null and null_allowed. + void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, + ManagedRegister scratch, bool null_allowed); + + // src holds a SIRT entry (Object**) load this into dst. + void LoadReferenceFromSirt(ManagedRegister dst, + ManagedRegister src); + + // Heap::VerifyObject on src. In some cases (such as a reference to this) we + // know that src may not be null. + void VerifyObject(ManagedRegister src, bool could_be_null); + void VerifyObject(FrameOffset src, bool could_be_null); + + // Call to address held at [base+offset]. + void Call(ManagedRegister base, Offset offset, ManagedRegister scratch); + void Call(FrameOffset base, Offset offset, ManagedRegister scratch); + void Call(ThreadOffset offset, ManagedRegister scratch); + + // Generate code to check if Thread::Current()->exception_ is non-null + // and branch to a ExceptionSlowPath if it is. + void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust); + + private: + static vixl::Register reg_x(int code) { + CHECK(code < kNumberOfCoreRegisters) << code; + if (code == SP) { + return vixl::sp; + } + return vixl::Register::XRegFromCode(code); + } + + static vixl::Register reg_w(int code) { + return vixl::Register::WRegFromCode(code); + } + + static vixl::FPRegister reg_d(int code) { + return vixl::FPRegister::DRegFromCode(code); + } + + static vixl::FPRegister reg_s(int code) { + return vixl::FPRegister::SRegFromCode(code); + } + + // Emits Exception block. + void EmitExceptionPoll(Arm64Exception *exception); + + void StoreWToOffset(StoreOperandType type, WRegister source, + Register base, int32_t offset); + void StoreToOffset(Register source, Register base, int32_t offset); + void StoreSToOffset(SRegister source, Register base, int32_t offset); + void StoreDToOffset(DRegister source, Register base, int32_t offset); + + void LoadImmediate(Register dest, int32_t value, Condition cond = AL); + void Load(Arm64ManagedRegister dst, Register src, int32_t src_offset, size_t size); + void LoadWFromOffset(LoadOperandType type, WRegister dest, + Register base, int32_t offset); + void LoadFromOffset(Register dest, Register base, int32_t offset); + void LoadSFromOffset(SRegister dest, Register base, int32_t offset); + void LoadDFromOffset(DRegister dest, Register base, int32_t offset); + void AddConstant(Register rd, int32_t value, Condition cond = AL); + void AddConstant(Register rd, Register rn, int32_t value, Condition cond = AL); + + // Vixl buffer size. + static constexpr size_t BUF_SIZE = 4096; + + // Vixl buffer. + byte* vixl_buf_; + + // Unique ptr - vixl assembler. + UniquePtr<vixl::MacroAssembler> vixl_masm_; + + // List of exception blocks to generate at the end of the code cache. + std::vector<Arm64Exception*> exception_blocks_; +}; + +class Arm64Exception { + private: + explicit Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust) + : scratch_(scratch), stack_adjust_(stack_adjust) { + } + + vixl::Label* Entry() { return &exception_entry_; } + + // Register used for passing Thread::Current()->exception_ . + const Arm64ManagedRegister scratch_; + + // Stack adjust for ExceptionPool. + const size_t stack_adjust_; + + vixl::Label exception_entry_; + + friend class Arm64Assembler; + DISALLOW_COPY_AND_ASSIGN(Arm64Exception); +}; + +} // namespace arm64 +} // namespace art + +#endif // ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_ diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h new file mode 100644 index 0000000..c05c2f1 --- /dev/null +++ b/compiler/utils/arm64/constants_arm64.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_ +#define ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_ + +#include <stdint.h> +#include <iosfwd> +#include "arch/arm64/registers_arm64.h" +#include "base/casts.h" +#include "base/logging.h" +#include "globals.h" + +// TODO: Extend this file by adding missing functionality. + +namespace art { +namespace arm64 { + + constexpr unsigned int kCalleeSavedRegsSize = 20; + +} // arm64 +} // art + +#endif // ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_ diff --git a/compiler/utils/arm64/managed_register_arm64.cc b/compiler/utils/arm64/managed_register_arm64.cc new file mode 100644 index 0000000..cc0b509 --- /dev/null +++ b/compiler/utils/arm64/managed_register_arm64.cc @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "managed_register_arm64.h" +#include "globals.h" + +namespace art { +namespace arm64 { + +// TODO: Define convention +// +// Do not use APCS callee saved regs for now. Use: +// * [X0, X15] +// * [W0, W15] +// * [D0, D31] +// * [S0, S31] +static const int kNumberOfAvailableCoreRegisters = (X15 - X0) + 1; +static const int kNumberOfAvailableWRegisters = (W15 - W0) + 1; +static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters; +static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters; + +// Returns true if this managed-register overlaps the other managed-register. +// GP Register Bank: +// 31____0 W[n] +// 63__________0 X[n] +// +// FP Register Bank: +// 31____0 S[n] +// 63__________0 D[n] +bool Arm64ManagedRegister::Overlaps(const Arm64ManagedRegister& other) const { + if (IsNoRegister() || other.IsNoRegister()) return false; + if ((IsGPRegister() && other.IsGPRegister()) || + (IsFPRegister() && other.IsFPRegister())) { + return (RegNo() == other.RegNo()); + } + return false; +} + +int Arm64ManagedRegister::RegNo() const { + CHECK(!IsNoRegister()); + int no; + if (IsCoreRegister()) { + if (IsStackPointer()) { + no = static_cast<int>(X31); + } else { + no = static_cast<int>(AsCoreRegister()); + } + } else if (IsWRegister()) { + no = static_cast<int>(AsWRegister()); + } else if (IsDRegister()) { + no = static_cast<int>(AsDRegister()); + } else if (IsSRegister()) { + no = static_cast<int>(AsSRegister()); + } else { + no = kNoRegister; + } + return no; +} + +int Arm64ManagedRegister::RegIdLow() const { + CHECK(IsCoreRegister() || IsDRegister()); + int low = RegNo(); + if (IsCoreRegister()) { + low += kNumberOfCoreRegIds; + } else if (IsDRegister()) { + low += kNumberOfCoreRegIds + kNumberOfWRegIds + kNumberOfDRegIds; + } + return low; +} + +// FIXME: Find better naming. +int Arm64ManagedRegister::RegIdHigh() const { + CHECK(IsWRegister() || IsSRegister()); + int high = RegNo(); + if (IsSRegister()) { + high += kNumberOfCoreRegIds + kNumberOfWRegIds; + } + return high; +} + +void Arm64ManagedRegister::Print(std::ostream& os) const { + if (!IsValidManagedRegister()) { + os << "No Register"; + } else if (IsCoreRegister()) { + os << "XCore: " << static_cast<int>(AsCoreRegister()); + } else if (IsWRegister()) { + os << "WCore: " << static_cast<int>(AsWRegister()); + } else if (IsDRegister()) { + os << "DRegister: " << static_cast<int>(AsDRegister()); + } else if (IsSRegister()) { + os << "SRegister: " << static_cast<int>(AsSRegister()); + } else { + os << "??: " << RegId(); + } +} + +std::ostream& operator<<(std::ostream& os, const Arm64ManagedRegister& reg) { + reg.Print(os); + return os; +} + +} // namespace arm64 +} // namespace art diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h new file mode 100644 index 0000000..5df37cc --- /dev/null +++ b/compiler/utils/arm64/managed_register_arm64.h @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_ +#define ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_ + +#include "base/logging.h" +#include "constants_arm64.h" +#include "utils/managed_register.h" + +namespace art { +namespace arm64 { + +const int kNumberOfCoreRegIds = kNumberOfCoreRegisters; +const int kNumberOfWRegIds = kNumberOfWRegisters; +const int kNumberOfDRegIds = kNumberOfDRegisters; +const int kNumberOfSRegIds = kNumberOfSRegisters; + +const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfWRegIds + + kNumberOfDRegIds + kNumberOfSRegIds; + +// Register ids map: +// [0..X[ core registers 64bit (enum Register) +// [X..W[ core registers 32bit (enum WRegister) +// [W..D[ double precision VFP registers (enum DRegister) +// [D..S[ single precision VFP registers (enum SRegister) +// +// where: +// X = kNumberOfCoreRegIds +// W = X + kNumberOfWRegIds +// D = W + kNumberOfDRegIds +// S = D + kNumberOfSRegIds +// +// An instance of class 'ManagedRegister' represents a single Arm64 +// register. A register can be one of the following: +// * core register 64bit context (enum Register) +// * core register 32bit context (enum WRegister) +// * VFP double precision register (enum DRegister) +// * VFP single precision register (enum SRegister) +// +// There is a one to one mapping between ManagedRegister and register id. + +class Arm64ManagedRegister : public ManagedRegister { + public: + Register AsCoreRegister() const { + CHECK(IsCoreRegister()); + return static_cast<Register>(id_); + } + + WRegister AsWRegister() const { + CHECK(IsWRegister()); + return static_cast<WRegister>(id_ - kNumberOfCoreRegIds); + } + + DRegister AsDRegister() const { + CHECK(IsDRegister()); + return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfWRegIds); + } + + SRegister AsSRegister() const { + CHECK(IsSRegister()); + return static_cast<SRegister>(id_ - kNumberOfCoreRegIds - kNumberOfWRegIds - + kNumberOfDRegIds); + } + + WRegister AsOverlappingCoreRegisterLow() const { + CHECK(IsValidManagedRegister()); + if (IsStackPointer()) return W31; + return static_cast<WRegister>(AsCoreRegister()); + } + + // FIXME: Find better naming. + Register AsOverlappingWRegisterCore() const { + CHECK(IsValidManagedRegister()); + return static_cast<Register>(AsWRegister()); + } + + SRegister AsOverlappingDRegisterLow() const { + CHECK(IsValidManagedRegister()); + return static_cast<SRegister>(AsDRegister()); + } + + // FIXME: Find better naming. + DRegister AsOverlappingSRegisterD() const { + CHECK(IsValidManagedRegister()); + return static_cast<DRegister>(AsSRegister()); + } + + bool IsCoreRegister() const { + CHECK(IsValidManagedRegister()); + return (0 <= id_) && (id_ < kNumberOfCoreRegIds); + } + + bool IsWRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - kNumberOfCoreRegIds; + return (0 <= test) && (test < kNumberOfWRegIds); + } + + bool IsDRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - (kNumberOfCoreRegIds + kNumberOfWRegIds); + return (0 <= test) && (test < kNumberOfDRegIds); + } + + bool IsSRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - (kNumberOfCoreRegIds + kNumberOfWRegIds + + kNumberOfDRegIds); + return (0 <= test) && (test < kNumberOfSRegIds); + } + + bool IsGPRegister() const { + return IsCoreRegister() || IsWRegister(); + } + + bool IsFPRegister() const { + return IsDRegister() || IsSRegister(); + } + + bool IsSameType(Arm64ManagedRegister test) const { + CHECK(IsValidManagedRegister() && test.IsValidManagedRegister()); + return + (IsCoreRegister() && test.IsCoreRegister()) || + (IsWRegister() && test.IsWRegister()) || + (IsDRegister() && test.IsDRegister()) || + (IsSRegister() && test.IsSRegister()); + } + + // Returns true if the two managed-registers ('this' and 'other') overlap. + // Either managed-register may be the NoRegister. If both are the NoRegister + // then false is returned. + bool Overlaps(const Arm64ManagedRegister& other) const; + + void Print(std::ostream& os) const; + + static Arm64ManagedRegister FromCoreRegister(Register r) { + CHECK_NE(r, kNoRegister); + return FromRegId(r); + } + + static Arm64ManagedRegister FromWRegister(WRegister r) { + CHECK_NE(r, kNoWRegister); + return FromRegId(r + kNumberOfCoreRegIds); + } + + static Arm64ManagedRegister FromDRegister(DRegister r) { + CHECK_NE(r, kNoDRegister); + return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfWRegIds)); + } + + static Arm64ManagedRegister FromSRegister(SRegister r) { + CHECK_NE(r, kNoSRegister); + return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfWRegIds + + kNumberOfDRegIds)); + } + + // Returns the X register overlapping W register r. + static Arm64ManagedRegister FromWRegisterCore(WRegister r) { + CHECK_NE(r, kNoWRegister); + return FromRegId(r); + } + + // Return the D register overlapping S register r. + static Arm64ManagedRegister FromSRegisterD(SRegister r) { + CHECK_NE(r, kNoSRegister); + return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfWRegIds)); + } + + private: + bool IsValidManagedRegister() const { + return (0 <= id_) && (id_ < kNumberOfRegIds); + } + + bool IsStackPointer() const { + return IsCoreRegister() && (id_ == SP); + } + + int RegId() const { + CHECK(!IsNoRegister()); + return id_; + } + + int RegNo() const; + int RegIdLow() const; + int RegIdHigh() const; + + friend class ManagedRegister; + + explicit Arm64ManagedRegister(int reg_id) : ManagedRegister(reg_id) {} + + static Arm64ManagedRegister FromRegId(int reg_id) { + Arm64ManagedRegister reg(reg_id); + CHECK(reg.IsValidManagedRegister()); + return reg; + } +}; + +std::ostream& operator<<(std::ostream& os, const Arm64ManagedRegister& reg); + +} // namespace arm64 + +inline arm64::Arm64ManagedRegister ManagedRegister::AsArm64() const { + arm64::Arm64ManagedRegister reg(id_); + CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister()); + return reg; +} + +} // namespace art + +#endif // ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_ diff --git a/compiler/utils/arm64/managed_register_arm64_test.cc b/compiler/utils/arm64/managed_register_arm64_test.cc new file mode 100644 index 0000000..3d98e12 --- /dev/null +++ b/compiler/utils/arm64/managed_register_arm64_test.cc @@ -0,0 +1,611 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "globals.h" +#include "managed_register_arm64.h" +#include "gtest/gtest.h" + +namespace art { +namespace arm64 { + +TEST(Arm64ManagedRegister, NoRegister) { + Arm64ManagedRegister reg = ManagedRegister::NoRegister().AsArm64(); + EXPECT_TRUE(reg.IsNoRegister()); + EXPECT_TRUE(!reg.Overlaps(reg)); +} + +// X Register test. +TEST(Arm64ManagedRegister, CoreRegister) { + Arm64ManagedRegister reg = Arm64ManagedRegister::FromCoreRegister(X0); + Arm64ManagedRegister wreg = Arm64ManagedRegister::FromWRegister(W0); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(wreg)); + EXPECT_EQ(X0, reg.AsCoreRegister()); + + reg = Arm64ManagedRegister::FromCoreRegister(X1); + wreg = Arm64ManagedRegister::FromWRegister(W1); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(wreg)); + EXPECT_EQ(X1, reg.AsCoreRegister()); + + reg = Arm64ManagedRegister::FromCoreRegister(X7); + wreg = Arm64ManagedRegister::FromWRegister(W7); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(wreg)); + EXPECT_EQ(X7, reg.AsCoreRegister()); + + reg = Arm64ManagedRegister::FromCoreRegister(X15); + wreg = Arm64ManagedRegister::FromWRegister(W15); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(wreg)); + EXPECT_EQ(X15, reg.AsCoreRegister()); + + reg = Arm64ManagedRegister::FromCoreRegister(X19); + wreg = Arm64ManagedRegister::FromWRegister(W19); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(wreg)); + EXPECT_EQ(X19, reg.AsCoreRegister()); + + reg = Arm64ManagedRegister::FromCoreRegister(X16); + wreg = Arm64ManagedRegister::FromWRegister(W16); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(wreg)); + EXPECT_EQ(IP0, reg.AsCoreRegister()); + + reg = Arm64ManagedRegister::FromCoreRegister(SP); + wreg = Arm64ManagedRegister::FromWRegister(WZR); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(wreg)); + EXPECT_EQ(SP, reg.AsCoreRegister()); +} + +// W register test. +TEST(Arm64ManagedRegister, WRegister) { + Arm64ManagedRegister reg = Arm64ManagedRegister::FromWRegister(W0); + Arm64ManagedRegister xreg = Arm64ManagedRegister::FromCoreRegister(X0); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(xreg)); + EXPECT_EQ(W0, reg.AsWRegister()); + + reg = Arm64ManagedRegister::FromWRegister(W5); + xreg = Arm64ManagedRegister::FromCoreRegister(X5); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(xreg)); + EXPECT_EQ(W5, reg.AsWRegister()); + + reg = Arm64ManagedRegister::FromWRegister(W6); + xreg = Arm64ManagedRegister::FromCoreRegister(X6); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(xreg)); + EXPECT_EQ(W6, reg.AsWRegister()); + + reg = Arm64ManagedRegister::FromWRegister(W18); + xreg = Arm64ManagedRegister::FromCoreRegister(X18); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(xreg)); + EXPECT_EQ(W18, reg.AsWRegister()); + + reg = Arm64ManagedRegister::FromWRegister(W29); + xreg = Arm64ManagedRegister::FromCoreRegister(FP); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(xreg)); + EXPECT_EQ(W29, reg.AsWRegister()); + + reg = Arm64ManagedRegister::FromWRegister(WZR); + xreg = Arm64ManagedRegister::FromCoreRegister(SP); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(reg.IsWRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(xreg)); + EXPECT_EQ(W31, reg.AsWRegister()); +} + +// D Register test. +TEST(Arm64ManagedRegister, DRegister) { + Arm64ManagedRegister reg = Arm64ManagedRegister::FromDRegister(D0); + Arm64ManagedRegister sreg = Arm64ManagedRegister::FromSRegister(S0); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(sreg)); + EXPECT_EQ(D0, reg.AsDRegister()); + EXPECT_EQ(S0, reg.AsOverlappingDRegisterLow()); + EXPECT_TRUE(reg.Equals(Arm64ManagedRegister::FromDRegister(D0))); + + reg = Arm64ManagedRegister::FromDRegister(D1); + sreg = Arm64ManagedRegister::FromSRegister(S1); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(sreg)); + EXPECT_EQ(D1, reg.AsDRegister()); + EXPECT_EQ(S1, reg.AsOverlappingDRegisterLow()); + EXPECT_TRUE(reg.Equals(Arm64ManagedRegister::FromDRegister(D1))); + + reg = Arm64ManagedRegister::FromDRegister(D20); + sreg = Arm64ManagedRegister::FromSRegister(S20); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(sreg)); + EXPECT_EQ(D20, reg.AsDRegister()); + EXPECT_EQ(S20, reg.AsOverlappingDRegisterLow()); + EXPECT_TRUE(reg.Equals(Arm64ManagedRegister::FromDRegister(D20))); + + reg = Arm64ManagedRegister::FromDRegister(D31); + sreg = Arm64ManagedRegister::FromSRegister(S31); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(reg.IsDRegister()); + EXPECT_TRUE(!reg.IsSRegister()); + EXPECT_TRUE(reg.Overlaps(sreg)); + EXPECT_EQ(D31, reg.AsDRegister()); + EXPECT_EQ(S31, reg.AsOverlappingDRegisterLow()); + EXPECT_TRUE(reg.Equals(Arm64ManagedRegister::FromDRegister(D31))); +} + +// S Register test. +TEST(Arm64ManagedRegister, SRegister) { + Arm64ManagedRegister reg = Arm64ManagedRegister::FromSRegister(S0); + Arm64ManagedRegister dreg = Arm64ManagedRegister::FromDRegister(D0); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(reg.Overlaps(dreg)); + EXPECT_EQ(S0, reg.AsSRegister()); + EXPECT_EQ(D0, reg.AsOverlappingSRegisterD()); + EXPECT_TRUE(reg.Equals(Arm64ManagedRegister::FromSRegister(S0))); + + reg = Arm64ManagedRegister::FromSRegister(S5); + dreg = Arm64ManagedRegister::FromDRegister(D5); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(reg.Overlaps(dreg)); + EXPECT_EQ(S5, reg.AsSRegister()); + EXPECT_EQ(D5, reg.AsOverlappingSRegisterD()); + EXPECT_TRUE(reg.Equals(Arm64ManagedRegister::FromSRegister(S5))); + + reg = Arm64ManagedRegister::FromSRegister(S7); + dreg = Arm64ManagedRegister::FromDRegister(D7); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(reg.Overlaps(dreg)); + EXPECT_EQ(S7, reg.AsSRegister()); + EXPECT_EQ(D7, reg.AsOverlappingSRegisterD()); + EXPECT_TRUE(reg.Equals(Arm64ManagedRegister::FromSRegister(S7))); + + reg = Arm64ManagedRegister::FromSRegister(S31); + dreg = Arm64ManagedRegister::FromDRegister(D31); + EXPECT_TRUE(!reg.IsNoRegister()); + EXPECT_TRUE(!reg.IsCoreRegister()); + EXPECT_TRUE(!reg.IsWRegister()); + EXPECT_TRUE(reg.IsSRegister()); + EXPECT_TRUE(!reg.IsDRegister()); + EXPECT_TRUE(reg.Overlaps(dreg)); + EXPECT_EQ(S31, reg.AsSRegister()); + EXPECT_EQ(D31, reg.AsOverlappingSRegisterD()); + EXPECT_TRUE(reg.Equals(Arm64ManagedRegister::FromSRegister(S31))); +} + +TEST(Arm64ManagedRegister, Equals) { + ManagedRegister no_reg = ManagedRegister::NoRegister(); + EXPECT_TRUE(no_reg.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!no_reg.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!no_reg.Equals(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!no_reg.Equals(Arm64ManagedRegister::FromWRegister(W0))); + EXPECT_TRUE(!no_reg.Equals(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!no_reg.Equals(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!no_reg.Equals(Arm64ManagedRegister::FromSRegister(S0))); + + Arm64ManagedRegister reg_X0 = Arm64ManagedRegister::FromCoreRegister(X0); + EXPECT_TRUE(!reg_X0.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(reg_X0.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!reg_X0.Equals(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg_X0.Equals(Arm64ManagedRegister::FromWRegister(W0))); + EXPECT_TRUE(!reg_X0.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_X0.Equals(Arm64ManagedRegister::FromDRegister(D0))); + + Arm64ManagedRegister reg_X1 = Arm64ManagedRegister::FromCoreRegister(X1); + EXPECT_TRUE(!reg_X1.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_X1.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(reg_X1.Equals(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg_X1.Equals(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg_X1.Equals(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_X1.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_X1.Equals(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_X1.Equals(Arm64ManagedRegister::FromSRegister(S1))); + + Arm64ManagedRegister reg_X31 = Arm64ManagedRegister::FromCoreRegister(X31); + EXPECT_TRUE(!reg_X31.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_X31.Equals(Arm64ManagedRegister::FromCoreRegister(SP))); + EXPECT_TRUE(reg_X31.Equals(Arm64ManagedRegister::FromCoreRegister(XZR))); + EXPECT_TRUE(!reg_X31.Equals(Arm64ManagedRegister::FromWRegister(W31))); + EXPECT_TRUE(!reg_X31.Equals(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_TRUE(!reg_X31.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_X31.Equals(Arm64ManagedRegister::FromDRegister(D0))); + + Arm64ManagedRegister reg_SP = Arm64ManagedRegister::FromCoreRegister(SP); + EXPECT_TRUE(!reg_SP.Equals(Arm64ManagedRegister::NoRegister())); + // We expect these to pass - SP has a different semantic than X31/XZR. + EXPECT_TRUE(!reg_SP.Equals(Arm64ManagedRegister::FromCoreRegister(X31))); + EXPECT_TRUE(!reg_SP.Equals(Arm64ManagedRegister::FromCoreRegister(XZR))); + EXPECT_TRUE(!reg_SP.Equals(Arm64ManagedRegister::FromWRegister(W31))); + EXPECT_TRUE(!reg_SP.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_SP.Equals(Arm64ManagedRegister::FromDRegister(D0))); + + Arm64ManagedRegister reg_W8 = Arm64ManagedRegister::FromWRegister(W8); + EXPECT_TRUE(!reg_W8.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_W8.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!reg_W8.Equals(Arm64ManagedRegister::FromCoreRegister(X8))); + EXPECT_TRUE(reg_W8.Equals(Arm64ManagedRegister::FromWRegister(W8))); + EXPECT_TRUE(!reg_W8.Equals(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_W8.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_W8.Equals(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_W8.Equals(Arm64ManagedRegister::FromSRegister(S1))); + + Arm64ManagedRegister reg_W12 = Arm64ManagedRegister::FromWRegister(W12); + EXPECT_TRUE(!reg_W12.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_W12.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!reg_W12.Equals(Arm64ManagedRegister::FromCoreRegister(X8))); + EXPECT_TRUE(reg_W12.Equals(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg_W12.Equals(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_W12.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_W12.Equals(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg_W12.Equals(Arm64ManagedRegister::FromSRegister(S1))); + + Arm64ManagedRegister reg_S0 = Arm64ManagedRegister::FromSRegister(S0); + EXPECT_TRUE(!reg_S0.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_S0.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!reg_S0.Equals(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg_S0.Equals(Arm64ManagedRegister::FromWRegister(W0))); + EXPECT_TRUE(reg_S0.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_S0.Equals(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg_S0.Equals(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_S0.Equals(Arm64ManagedRegister::FromDRegister(D1))); + + Arm64ManagedRegister reg_S1 = Arm64ManagedRegister::FromSRegister(S1); + EXPECT_TRUE(!reg_S1.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_S1.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!reg_S1.Equals(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg_S1.Equals(Arm64ManagedRegister::FromWRegister(W0))); + EXPECT_TRUE(!reg_S1.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(reg_S1.Equals(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg_S1.Equals(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_S1.Equals(Arm64ManagedRegister::FromDRegister(D1))); + + Arm64ManagedRegister reg_S31 = Arm64ManagedRegister::FromSRegister(S31); + EXPECT_TRUE(!reg_S31.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_S31.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!reg_S31.Equals(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg_S31.Equals(Arm64ManagedRegister::FromWRegister(W0))); + EXPECT_TRUE(!reg_S31.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(reg_S31.Equals(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_S31.Equals(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_S31.Equals(Arm64ManagedRegister::FromDRegister(D1))); + + Arm64ManagedRegister reg_D0 = Arm64ManagedRegister::FromDRegister(D0); + EXPECT_TRUE(!reg_D0.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_D0.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!reg_D0.Equals(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg_D0.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_D0.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_D0.Equals(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(reg_D0.Equals(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_D0.Equals(Arm64ManagedRegister::FromDRegister(D1))); + + Arm64ManagedRegister reg_D15 = Arm64ManagedRegister::FromDRegister(D15); + EXPECT_TRUE(!reg_D15.Equals(Arm64ManagedRegister::NoRegister())); + EXPECT_TRUE(!reg_D15.Equals(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!reg_D15.Equals(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg_D15.Equals(Arm64ManagedRegister::FromWRegister(W0))); + EXPECT_TRUE(!reg_D15.Equals(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg_D15.Equals(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg_D15.Equals(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg_D15.Equals(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(reg_D15.Equals(Arm64ManagedRegister::FromDRegister(D15))); +} + +TEST(Arm64ManagedRegister, Overlaps) { + Arm64ManagedRegister reg = Arm64ManagedRegister::FromCoreRegister(X0); + Arm64ManagedRegister reg_o = Arm64ManagedRegister::FromWRegister(W0); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(SP))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromWRegister(W0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_EQ(X0, reg_o.AsOverlappingWRegisterCore()); + EXPECT_EQ(W0, reg.AsOverlappingCoreRegisterLow()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + + reg = Arm64ManagedRegister::FromCoreRegister(X10); + reg_o = Arm64ManagedRegister::FromWRegister(W10); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X10))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(SP))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromWRegister(W10))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_EQ(X10, reg_o.AsOverlappingWRegisterCore()); + EXPECT_EQ(W10, reg.AsOverlappingCoreRegisterLow()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + + reg = Arm64ManagedRegister::FromCoreRegister(IP1); + reg_o = Arm64ManagedRegister::FromWRegister(W17); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X17))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(SP))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromWRegister(W17))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_EQ(X17, reg_o.AsOverlappingWRegisterCore()); + EXPECT_EQ(W17, reg.AsOverlappingCoreRegisterLow()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + + reg = Arm64ManagedRegister::FromCoreRegister(XZR); + reg_o = Arm64ManagedRegister::FromWRegister(WZR); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(SP))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromWRegister(W31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W19))); + EXPECT_EQ(X31, reg_o.AsOverlappingWRegisterCore()); + EXPECT_EQ(W31, reg.AsOverlappingCoreRegisterLow()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + + reg = Arm64ManagedRegister::FromCoreRegister(SP); + reg_o = Arm64ManagedRegister::FromWRegister(WZR); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X15))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromWRegister(W31))); + EXPECT_EQ(X31, reg_o.AsOverlappingWRegisterCore()); + EXPECT_EQ(W31, reg.AsOverlappingCoreRegisterLow()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + + reg = Arm64ManagedRegister::FromWRegister(W1); + reg_o = Arm64ManagedRegister::FromCoreRegister(X1); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W31))); + EXPECT_EQ(W1, reg_o.AsOverlappingCoreRegisterLow()); + EXPECT_EQ(X1, reg.AsOverlappingWRegisterCore()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + + reg = Arm64ManagedRegister::FromWRegister(W21); + reg_o = Arm64ManagedRegister::FromCoreRegister(X21); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromWRegister(W21))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X21))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W31))); + EXPECT_EQ(W21, reg_o.AsOverlappingCoreRegisterLow()); + EXPECT_EQ(X21, reg.AsOverlappingWRegisterCore()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + + + reg = Arm64ManagedRegister::FromSRegister(S1); + reg_o = Arm64ManagedRegister::FromDRegister(D1); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W31))); + EXPECT_EQ(S1, reg_o.AsOverlappingDRegisterLow()); + EXPECT_EQ(D1, reg.AsOverlappingSRegisterD()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromSRegister(S1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S30))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D0))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromDRegister(D1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D7))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + + reg = Arm64ManagedRegister::FromSRegister(S15); + reg_o = Arm64ManagedRegister::FromDRegister(D15); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W31))); + EXPECT_EQ(S15, reg_o.AsOverlappingDRegisterLow()); + EXPECT_EQ(D15, reg.AsOverlappingSRegisterD()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S17))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S16))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D17))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D20))); + + reg = Arm64ManagedRegister::FromDRegister(D15); + reg_o = Arm64ManagedRegister::FromSRegister(S15); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromCoreRegister(X15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(WZR))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W1))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W12))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromWRegister(W31))); + EXPECT_EQ(S15, reg.AsOverlappingDRegisterLow()); + EXPECT_EQ(D15, reg_o.AsOverlappingSRegisterD()); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S0))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromSRegister(S15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S17))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S16))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromSRegister(S31))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D16))); + EXPECT_TRUE(reg.Overlaps(Arm64ManagedRegister::FromDRegister(D15))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D2))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D17))); + EXPECT_TRUE(!reg.Overlaps(Arm64ManagedRegister::FromDRegister(D20))); +} + +} // namespace arm64 +} // namespace art |