diff options
author | Ian Rogers <irogers@google.com> | 2014-03-14 17:43:00 -0700 |
---|---|---|
committer | Ian Rogers <irogers@google.com> | 2014-04-01 08:24:16 -0700 |
commit | dd7624d2b9e599d57762d12031b10b89defc9807 (patch) | |
tree | c972296737f992a84b1552561f823991d28403f0 /compiler/utils/x86 | |
parent | 8464a64a50190c06e95015a932eda9511fa6473d (diff) | |
download | art-dd7624d2b9e599d57762d12031b10b89defc9807.zip art-dd7624d2b9e599d57762d12031b10b89defc9807.tar.gz art-dd7624d2b9e599d57762d12031b10b89defc9807.tar.bz2 |
Allow mixing of thread offsets between 32 and 64bit architectures.
Begin a more full implementation x86-64 REX prefixes.
Doesn't implement 64bit thread offset support for the JNI compiler.
Change-Id: If9af2f08a1833c21ddb4b4077f9b03add1a05147
Diffstat (limited to 'compiler/utils/x86')
-rw-r--r-- | compiler/utils/x86/assembler_x86.cc | 36 | ||||
-rw-r--r-- | compiler/utils/x86/assembler_x86.h | 148 |
2 files changed, 80 insertions, 104 deletions
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index ebbb43a..aac8b01 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1478,12 +1478,12 @@ void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, movl(Address(ESP, dest), Immediate(imm)); } -void X86Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm, +void X86Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister) { fs()->movl(Address::Absolute(dest), Immediate(imm)); } -void X86Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs, +void X86Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); @@ -1492,14 +1492,10 @@ void X86Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs, fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); } -void X86Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) { +void X86Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) { fs()->movl(Address::Absolute(thr_offs), ESP); } -void X86Assembler::StoreLabelToThread(ThreadOffset thr_offs, Label* lbl) { - fs()->movl(Address::Absolute(thr_offs), lbl); -} - void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/, FrameOffset /*in_off*/, ManagedRegister /*scratch*/) { UNIMPLEMENTED(FATAL); // this case only currently exists for ARM @@ -1532,7 +1528,7 @@ void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { } } -void X86Assembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) { +void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) { X86ManagedRegister dest = mdest.AsX86(); if (dest.IsNoRegister()) { CHECK_EQ(0u, size); @@ -1542,7 +1538,7 @@ void X86Assembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) { } else if (dest.IsRegisterPair()) { CHECK_EQ(8u, size); fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src)); - fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset(src.Int32Value()+4))); + fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset<4>(src.Int32Value()+4))); } else if (dest.IsX87Register()) { if (size == 4) { fs()->flds(Address::Absolute(src)); @@ -1582,8 +1578,8 @@ void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); } -void X86Assembler::LoadRawPtrFromThread(ManagedRegister mdest, - ThreadOffset offs) { +void X86Assembler::LoadRawPtrFromThread32(ManagedRegister mdest, + ThreadOffset<4> offs) { X86ManagedRegister dest = mdest.AsX86(); CHECK(dest.IsCpuRegister()); fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs)); @@ -1645,8 +1641,8 @@ void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src, movl(Address(ESP, dest), scratch.AsCpuRegister()); } -void X86Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, - ThreadOffset thr_offs, +void X86Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs, + ThreadOffset<4> thr_offs, ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); CHECK(scratch.IsCpuRegister()); @@ -1654,7 +1650,7 @@ void X86Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, Store(fr_offs, scratch, 4); } -void X86Assembler::CopyRawPtrToThread(ThreadOffset thr_offs, +void X86Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); @@ -1804,26 +1800,26 @@ void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratc call(Address(scratch, offset)); } -void X86Assembler::Call(ThreadOffset offset, ManagedRegister /*mscratch*/) { +void X86Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister /*mscratch*/) { fs()->call(Address::Absolute(offset)); } void X86Assembler::GetCurrentThread(ManagedRegister tr) { fs()->movl(tr.AsX86().AsCpuRegister(), - Address::Absolute(Thread::SelfOffset())); + Address::Absolute(Thread::SelfOffset<4>())); } void X86Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); - fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset())); + fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<4>())); movl(Address(ESP, offset), scratch.AsCpuRegister()); } void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust); buffer_.EnqueueSlowPath(slow); - fs()->cmpl(Address::Absolute(Thread::ExceptionOffset()), Immediate(0)); + fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<4>()), Immediate(0)); j(kNotEqual, slow->Entry()); } @@ -1836,8 +1832,8 @@ void X86ExceptionSlowPath::Emit(Assembler *sasm) { __ DecreaseFrameSize(stack_adjust_); } // Pass exception as argument in EAX - __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset())); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(pDeliverException))); + __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<4>())); + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException))); // this call should never return __ int3(); #undef __ diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index f906a6f..f8fc4c0 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -117,7 +117,6 @@ class Operand { private: byte length_; byte encoding_[6]; - byte padding_; explicit Operand(Register reg) { SetModRM(3, reg); } @@ -192,21 +191,15 @@ class Address : public Operand { } } - static Address Absolute(uword addr, bool has_rip = false) { + static Address Absolute(uword addr) { Address result; - if (has_rip) { - result.SetModRM(0, ESP); - result.SetSIB(TIMES_1, ESP, EBP); - result.SetDisp32(addr); - } else { - result.SetModRM(0, EBP); - result.SetDisp32(addr); - } + result.SetModRM(0, EBP); + result.SetDisp32(addr); return result; } - static Address Absolute(ThreadOffset addr, bool has_rip = false) { - return Absolute(addr.Int32Value(), has_rip); + static Address Absolute(ThreadOffset<4> addr) { + return Absolute(addr.Int32Value()); } private: @@ -465,129 +458,116 @@ class X86Assembler FINAL : public Assembler { // // Emit code that will create an activation on the stack - virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg, - const std::vector<ManagedRegister>& callee_save_regs, - const ManagedRegisterEntrySpills& entry_spills); + void BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector<ManagedRegister>& callee_save_regs, + const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; // Emit code that will remove an activation from the stack - virtual void RemoveFrame(size_t frame_size, - const std::vector<ManagedRegister>& callee_save_regs); + void RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs) + OVERRIDE; - virtual void IncreaseFrameSize(size_t adjust); - virtual void DecreaseFrameSize(size_t adjust); + void IncreaseFrameSize(size_t adjust) OVERRIDE; + void DecreaseFrameSize(size_t adjust) OVERRIDE; // Store routines - virtual void Store(FrameOffset offs, ManagedRegister src, size_t size); - virtual void StoreRef(FrameOffset dest, ManagedRegister src); - virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src); - - virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister scratch); + void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE; + void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE; + void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE; - virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm, - ManagedRegister scratch); + void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch); + void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch) + OVERRIDE; - virtual void StoreStackPointerToThread(ThreadOffset thr_offs); + void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, + ManagedRegister scratch) OVERRIDE; - void StoreLabelToThread(ThreadOffset thr_offs, Label* lbl); + void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE; - virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, - FrameOffset in_off, ManagedRegister scratch); + void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, + ManagedRegister scratch) OVERRIDE; // Load routines - virtual void Load(ManagedRegister dest, FrameOffset src, size_t size); + void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size); + void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE; - virtual void LoadRef(ManagedRegister dest, FrameOffset src); + void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; - virtual void LoadRef(ManagedRegister dest, ManagedRegister base, - MemberOffset offs); + void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE; - virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, - Offset offs); + void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - virtual void LoadRawPtrFromThread(ManagedRegister dest, - ThreadOffset offs); + void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE; // Copying routines - virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size); + void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs, - ManagedRegister scratch); + void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs, + ManagedRegister scratch) OVERRIDE; - virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs, - ManagedRegister scratch); + void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + OVERRIDE; - virtual void CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister scratch); + void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; - virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size); + void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE; - virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, - ManagedRegister scratch, size_t size); + void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch, + size_t size) OVERRIDE; - virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister scratch, size_t size); + void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch, + size_t size) OVERRIDE; - virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, - ManagedRegister scratch, size_t size); + void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch, + size_t size) OVERRIDE; - virtual void Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size); + void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset, + ManagedRegister scratch, size_t size) OVERRIDE; - virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister scratch, size_t size); + void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, + ManagedRegister scratch, size_t size) OVERRIDE; - virtual void MemoryBarrier(ManagedRegister); + void MemoryBarrier(ManagedRegister) OVERRIDE; // Sign extension - virtual void SignExtend(ManagedRegister mreg, size_t size); + void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE; // Zero extension - virtual void ZeroExtend(ManagedRegister mreg, size_t size); + void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE; // Exploit fast access in managed code to Thread::Current() - virtual void GetCurrentThread(ManagedRegister tr); - virtual void GetCurrentThread(FrameOffset dest_offset, - ManagedRegister scratch); + void GetCurrentThread(ManagedRegister tr) OVERRIDE; + void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE; // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the // value is null and null_allowed. in_reg holds a possibly stale reference // that can be used to avoid loading the SIRT entry to see if the value is // NULL. - virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, - ManagedRegister in_reg, bool null_allowed); + void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg, + bool null_allowed) OVERRIDE; // Set up out_off to hold a Object** into the SIRT, or to be NULL if the // value is null and null_allowed. - virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, - ManagedRegister scratch, bool null_allowed); + void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch, + bool null_allowed) OVERRIDE; // src holds a SIRT entry (Object**) load this into dst - virtual void LoadReferenceFromSirt(ManagedRegister dst, - ManagedRegister src); + void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE; // Heap::VerifyObject on src. In some cases (such as a reference to this) we // know that src may not be null. - virtual void VerifyObject(ManagedRegister src, bool could_be_null); - virtual void VerifyObject(FrameOffset src, bool could_be_null); + void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE; + void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE; // Call to address held at [base+offset] - virtual void Call(ManagedRegister base, Offset offset, - ManagedRegister scratch); - virtual void Call(FrameOffset base, Offset offset, - ManagedRegister scratch); - virtual void Call(ThreadOffset offset, ManagedRegister scratch); + void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; + void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. - virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust); + void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE; private: inline void EmitUint8(uint8_t value); @@ -637,10 +617,10 @@ inline void X86Assembler::EmitOperandSizeOverride() { } // Slowpath entered when Thread::Current()->_exception is non-null -class X86ExceptionSlowPath : public SlowPath { +class X86ExceptionSlowPath FINAL : public SlowPath { public: explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {} - virtual void Emit(Assembler *sp_asm); + virtual void Emit(Assembler *sp_asm) OVERRIDE; private: const size_t stack_adjust_; }; |