diff options
Diffstat (limited to 'lib/Target/AArch64/AArch64ISelLowering.cpp')
-rw-r--r-- | lib/Target/AArch64/AArch64ISelLowering.cpp | 11169 |
1 files changed, 6854 insertions, 4315 deletions
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 388973a..80d6669 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1,4 +1,4 @@ -//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===// +//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation ----===// // // The LLVM Compiler Infrastructure // @@ -7,46 +7,87 @@ // //===----------------------------------------------------------------------===// // -// This file defines the interfaces that AArch64 uses to lower LLVM code into a -// selection DAG. +// This file implements the AArch64TargetLowering class. // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "aarch64-isel" -#include "AArch64.h" #include "AArch64ISelLowering.h" +#include "AArch64PerfectShuffle.h" +#include "AArch64Subtarget.h" #include "AArch64MachineFunctionInfo.h" #include "AArch64TargetMachine.h" #include "AArch64TargetObjectFile.h" -#include "Utils/AArch64BaseInfo.h" -#include "llvm/CodeGen/Analysis.h" +#include "MCTargetDesc/AArch64AddressingModes.h" +#include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" -#include "llvm/IR/CallingConv.h" -#include "llvm/Support/MathExtras.h" - +#include "llvm/IR/Function.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetOptions.h" using namespace llvm; -static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) { - assert (TM.getSubtarget<AArch64Subtarget>().isTargetELF() && - "unknown subtarget type"); - return new AArch64ElfTargetObjectFile(); -} +#define DEBUG_TYPE "aarch64-lower" -AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) - : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) { +STATISTIC(NumTailCalls, "Number of tail calls"); +STATISTIC(NumShiftInserts, "Number of vector shift inserts"); + +enum AlignMode { + StrictAlign, + NoStrictAlign +}; + +static cl::opt<AlignMode> +Align(cl::desc("Load/store alignment support"), + cl::Hidden, cl::init(NoStrictAlign), + cl::values( + clEnumValN(StrictAlign, "aarch64-strict-align", + "Disallow all unaligned memory accesses"), + clEnumValN(NoStrictAlign, "aarch64-no-strict-align", + "Allow unaligned memory accesses"), + clEnumValEnd)); + +// Place holder until extr generation is tested fully. +static cl::opt<bool> +EnableAArch64ExtrGeneration("aarch64-extr-generation", cl::Hidden, + cl::desc("Allow AArch64 (or (shift)(shift))->extract"), + cl::init(true)); + +static cl::opt<bool> +EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden, + cl::desc("Allow AArch64 SLI/SRI formation"), + cl::init(false)); - const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); +//===----------------------------------------------------------------------===// +// AArch64 Lowering public interface. +//===----------------------------------------------------------------------===// +static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { + if (TM.getSubtarget<AArch64Subtarget>().isTargetDarwin()) + return new AArch64_MachoTargetObjectFile(); + + return new AArch64_ELFTargetObjectFile(); +} - // SIMD compares set the entire lane's bits to 1 +AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) + : TargetLowering(TM, createTLOF(TM)) { + Subtarget = &TM.getSubtarget<AArch64Subtarget>(); + + // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so + // we have to make something up. Arbitrarily, choose ZeroOrOne. + setBooleanContents(ZeroOrOneBooleanContent); + // When comparing vectors the result sets the different elements in the + // vector to all-one or all-zero. setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); - // Scalar register <-> type mapping - addRegisterClass(MVT::i32, &AArch64::GPR32RegClass); - addRegisterClass(MVT::i64, &AArch64::GPR64RegClass); + // Set up the register classes. + addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass); + addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass); if (Subtarget->hasFPARMv8()) { addRegisterClass(MVT::f16, &AArch64::FPR16RegClass); @@ -56,201 +97,86 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) } if (Subtarget->hasNEON()) { - // And the vectors - addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass); - addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass); - addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass); - addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); - addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass); - addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass); - addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass); - addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass); - addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); - addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass); - addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass); - addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass); - addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass); - addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass); - addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass); - addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass); + addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass); + addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass); + // Someone set us up the NEON. + addDRTypeForNEON(MVT::v2f32); + addDRTypeForNEON(MVT::v8i8); + addDRTypeForNEON(MVT::v4i16); + addDRTypeForNEON(MVT::v2i32); + addDRTypeForNEON(MVT::v1i64); + addDRTypeForNEON(MVT::v1f64); + + addQRTypeForNEON(MVT::v4f32); + addQRTypeForNEON(MVT::v2f64); + addQRTypeForNEON(MVT::v16i8); + addQRTypeForNEON(MVT::v8i16); + addQRTypeForNEON(MVT::v4i32); + addQRTypeForNEON(MVT::v2i64); } + // Compute derived properties from the register classes computeRegisterProperties(); - // We combine OR nodes for bitfield and NEON BSL operations. - setTargetDAGCombine(ISD::OR); - - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::SRA); - setTargetDAGCombine(ISD::SRL); - setTargetDAGCombine(ISD::SHL); - - setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); - setTargetDAGCombine(ISD::INTRINSIC_VOID); - setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); - - // AArch64 does not have i1 loads, or much of anything for i1 really. - setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); - setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); - - setStackPointerRegisterToSaveRestore(AArch64::XSP); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - - // We'll lower globals to wrappers for selection. + // Provide all sorts of operation actions setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); - - // A64 instructions have the comparison predicate attached to the user of the - // result, but having a separate comparison is valuable for matching. + setOperationAction(ISD::SETCC, MVT::i32, Custom); + setOperationAction(ISD::SETCC, MVT::i64, Custom); + setOperationAction(ISD::SETCC, MVT::f32, Custom); + setOperationAction(ISD::SETCC, MVT::f64, Custom); + setOperationAction(ISD::BRCOND, MVT::Other, Expand); setOperationAction(ISD::BR_CC, MVT::i32, Custom); setOperationAction(ISD::BR_CC, MVT::i64, Custom); setOperationAction(ISD::BR_CC, MVT::f32, Custom); setOperationAction(ISD::BR_CC, MVT::f64, Custom); - setOperationAction(ISD::SELECT, MVT::i32, Custom); setOperationAction(ISD::SELECT, MVT::i64, Custom); setOperationAction(ISD::SELECT, MVT::f32, Custom); setOperationAction(ISD::SELECT, MVT::f64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); - - setOperationAction(ISD::BRCOND, MVT::Other, Custom); - - setOperationAction(ISD::SETCC, MVT::i32, Custom); - setOperationAction(ISD::SETCC, MVT::i64, Custom); - setOperationAction(ISD::SETCC, MVT::f32, Custom); - setOperationAction(ISD::SETCC, MVT::f64, Custom); - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::JumpTable, MVT::i32, Custom); setOperationAction(ISD::JumpTable, MVT::i64, Custom); - setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VACOPY, MVT::Other, Custom); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - - setOperationAction(ISD::BlockAddress, MVT::i64, Custom); - setOperationAction(ISD::ConstantPool, MVT::i64, Custom); - - setOperationAction(ISD::ROTL, MVT::i32, Expand); - setOperationAction(ISD::ROTL, MVT::i64, Expand); - - setOperationAction(ISD::UREM, MVT::i32, Expand); - setOperationAction(ISD::UREM, MVT::i64, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i64, Expand); - - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::SREM, MVT::i64, Expand); - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::SDIVREM, MVT::i64, Expand); - - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); - - setOperationAction(ISD::CTPOP, MVT::i32, Expand); - setOperationAction(ISD::CTPOP, MVT::i64, Expand); - - // Legal floating-point operations. - setOperationAction(ISD::FABS, MVT::f32, Legal); - setOperationAction(ISD::FABS, MVT::f64, Legal); - - setOperationAction(ISD::FCEIL, MVT::f32, Legal); - setOperationAction(ISD::FCEIL, MVT::f64, Legal); - - setOperationAction(ISD::FFLOOR, MVT::f32, Legal); - setOperationAction(ISD::FFLOOR, MVT::f64, Legal); - - setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); - - setOperationAction(ISD::FNEG, MVT::f32, Legal); - setOperationAction(ISD::FNEG, MVT::f64, Legal); - - setOperationAction(ISD::FRINT, MVT::f32, Legal); - setOperationAction(ISD::FRINT, MVT::f64, Legal); - - setOperationAction(ISD::FSQRT, MVT::f32, Legal); - setOperationAction(ISD::FSQRT, MVT::f64, Legal); - - setOperationAction(ISD::FTRUNC, MVT::f32, Legal); - setOperationAction(ISD::FTRUNC, MVT::f64, Legal); - - setOperationAction(ISD::ConstantFP, MVT::f32, Legal); - setOperationAction(ISD::ConstantFP, MVT::f64, Legal); - setOperationAction(ISD::ConstantFP, MVT::f128, Legal); - - // Illegal floating-point operations. - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); - - setOperationAction(ISD::FCOS, MVT::f32, Expand); - setOperationAction(ISD::FCOS, MVT::f64, Expand); - - setOperationAction(ISD::FEXP, MVT::f32, Expand); - setOperationAction(ISD::FEXP, MVT::f64, Expand); - - setOperationAction(ISD::FEXP2, MVT::f32, Expand); - setOperationAction(ISD::FEXP2, MVT::f64, Expand); - - setOperationAction(ISD::FLOG, MVT::f32, Expand); - setOperationAction(ISD::FLOG, MVT::f64, Expand); - - setOperationAction(ISD::FLOG2, MVT::f32, Expand); - setOperationAction(ISD::FLOG2, MVT::f64, Expand); - - setOperationAction(ISD::FLOG10, MVT::f32, Expand); - setOperationAction(ISD::FLOG10, MVT::f64, Expand); - - setOperationAction(ISD::FPOW, MVT::f32, Expand); - setOperationAction(ISD::FPOW, MVT::f64, Expand); - - setOperationAction(ISD::FPOWI, MVT::f32, Expand); - setOperationAction(ISD::FPOWI, MVT::f64, Expand); + setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); + setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); + setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); setOperationAction(ISD::FREM, MVT::f32, Expand); setOperationAction(ISD::FREM, MVT::f64, Expand); + setOperationAction(ISD::FREM, MVT::f80, Expand); - setOperationAction(ISD::FSIN, MVT::f32, Expand); - setOperationAction(ISD::FSIN, MVT::f64, Expand); - - setOperationAction(ISD::FSINCOS, MVT::f32, Expand); - setOperationAction(ISD::FSINCOS, MVT::f64, Expand); + // Custom lowering hooks are needed for XOR + // to fold it into CSINC/CSINV. + setOperationAction(ISD::XOR, MVT::i32, Custom); + setOperationAction(ISD::XOR, MVT::i64, Custom); // Virtually no operation on f128 is legal, but LLVM can't expand them when // there's a valid register class, so we need custom operations in most cases. - setOperationAction(ISD::FABS, MVT::f128, Expand); - setOperationAction(ISD::FADD, MVT::f128, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); - setOperationAction(ISD::FCOS, MVT::f128, Expand); - setOperationAction(ISD::FDIV, MVT::f128, Custom); - setOperationAction(ISD::FMA, MVT::f128, Expand); - setOperationAction(ISD::FMUL, MVT::f128, Custom); - setOperationAction(ISD::FNEG, MVT::f128, Expand); - setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); - setOperationAction(ISD::FP_ROUND, MVT::f128, Expand); - setOperationAction(ISD::FPOW, MVT::f128, Expand); - setOperationAction(ISD::FREM, MVT::f128, Expand); - setOperationAction(ISD::FRINT, MVT::f128, Expand); - setOperationAction(ISD::FSIN, MVT::f128, Expand); - setOperationAction(ISD::FSINCOS, MVT::f128, Expand); - setOperationAction(ISD::FSQRT, MVT::f128, Expand); - setOperationAction(ISD::FSUB, MVT::f128, Custom); - setOperationAction(ISD::FTRUNC, MVT::f128, Expand); - setOperationAction(ISD::SETCC, MVT::f128, Custom); - setOperationAction(ISD::BR_CC, MVT::f128, Custom); - setOperationAction(ISD::SELECT, MVT::f128, Expand); - setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); - setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); + setOperationAction(ISD::FABS, MVT::f128, Expand); + setOperationAction(ISD::FADD, MVT::f128, Custom); + setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); + setOperationAction(ISD::FCOS, MVT::f128, Expand); + setOperationAction(ISD::FDIV, MVT::f128, Custom); + setOperationAction(ISD::FMA, MVT::f128, Expand); + setOperationAction(ISD::FMUL, MVT::f128, Custom); + setOperationAction(ISD::FNEG, MVT::f128, Expand); + setOperationAction(ISD::FPOW, MVT::f128, Expand); + setOperationAction(ISD::FREM, MVT::f128, Expand); + setOperationAction(ISD::FRINT, MVT::f128, Expand); + setOperationAction(ISD::FSIN, MVT::f128, Expand); + setOperationAction(ISD::FSINCOS, MVT::f128, Expand); + setOperationAction(ISD::FSQRT, MVT::f128, Expand); + setOperationAction(ISD::FSUB, MVT::f128, Custom); + setOperationAction(ISD::FTRUNC, MVT::f128, Expand); + setOperationAction(ISD::SETCC, MVT::f128, Custom); + setOperationAction(ISD::BR_CC, MVT::f128, Custom); + setOperationAction(ISD::SELECT, MVT::f128, Custom); + setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); + setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); // Lowering for many of the conversions is actually specified by the non-f128 // type. The LowerXXX function will be trivial when f128 isn't involved. @@ -266,623 +192,583 @@ AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); - setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); - - // i128 shift operation support - setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); + setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); + setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); - // This prevents LLVM trying to compress double constants into a floating - // constant-pool entry and trying to load from there. It's of doubtful benefit - // for A64: we'd need LDR followed by FCVT, I believe. - setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand); - setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); - setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand); + // Variable arguments. + setOperationAction(ISD::VASTART, MVT::Other, Custom); + setOperationAction(ISD::VAARG, MVT::Other, Custom); + setOperationAction(ISD::VACOPY, MVT::Other, Custom); + setOperationAction(ISD::VAEND, MVT::Other, Expand); - setTruncStoreAction(MVT::f128, MVT::f64, Expand); - setTruncStoreAction(MVT::f128, MVT::f32, Expand); - setTruncStoreAction(MVT::f128, MVT::f16, Expand); - setTruncStoreAction(MVT::f64, MVT::f32, Expand); - setTruncStoreAction(MVT::f64, MVT::f16, Expand); - setTruncStoreAction(MVT::f32, MVT::f16, Expand); + // Variable-sized objects. + setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); + // Exception handling. + // FIXME: These are guesses. Has this been defined yet? setExceptionPointerRegister(AArch64::X0); setExceptionSelectorRegister(AArch64::X1); - if (Subtarget->hasNEON()) { - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v1i64, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v16i8, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Expand); - - setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); - - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); - - setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i32, Legal); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal); - - setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i8, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i16, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); - - setOperationAction(ISD::SETCC, MVT::v8i8, Custom); - setOperationAction(ISD::SETCC, MVT::v16i8, Custom); - setOperationAction(ISD::SETCC, MVT::v4i16, Custom); - setOperationAction(ISD::SETCC, MVT::v8i16, Custom); - setOperationAction(ISD::SETCC, MVT::v2i32, Custom); - setOperationAction(ISD::SETCC, MVT::v4i32, Custom); - setOperationAction(ISD::SETCC, MVT::v1i64, Custom); - setOperationAction(ISD::SETCC, MVT::v2i64, Custom); - setOperationAction(ISD::SETCC, MVT::v2f32, Custom); - setOperationAction(ISD::SETCC, MVT::v4f32, Custom); - setOperationAction(ISD::SETCC, MVT::v1f64, Custom); - setOperationAction(ISD::SETCC, MVT::v2f64, Custom); - - setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal); - setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); - setOperationAction(ISD::FFLOOR, MVT::v1f64, Legal); - setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); - - setOperationAction(ISD::FCEIL, MVT::v2f32, Legal); - setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); - setOperationAction(ISD::FCEIL, MVT::v1f64, Legal); - setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); - - setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal); - setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); - setOperationAction(ISD::FTRUNC, MVT::v1f64, Legal); - setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); - - setOperationAction(ISD::FRINT, MVT::v2f32, Legal); - setOperationAction(ISD::FRINT, MVT::v4f32, Legal); - setOperationAction(ISD::FRINT, MVT::v1f64, Legal); - setOperationAction(ISD::FRINT, MVT::v2f64, Legal); - - setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); - - setOperationAction(ISD::FROUND, MVT::v2f32, Legal); - setOperationAction(ISD::FROUND, MVT::v4f32, Legal); - setOperationAction(ISD::FROUND, MVT::v1f64, Legal); - setOperationAction(ISD::FROUND, MVT::v2f64, Legal); - - setOperationAction(ISD::SINT_TO_FP, MVT::v1i8, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v1i16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v1i32, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom); + // Constant pool entries + setOperationAction(ISD::ConstantPool, MVT::i64, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v1i8, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v1i16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v1i32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom); + // BlockAddress + setOperationAction(ISD::BlockAddress, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v1i8, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v1i16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v1i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Custom); - - setOperationAction(ISD::FP_TO_UINT, MVT::v1i8, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v1i16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v1i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Custom); - - // Neon does not support vector divide/remainder operations except - // floating-point divide. - setOperationAction(ISD::SDIV, MVT::v1i8, Expand); - setOperationAction(ISD::SDIV, MVT::v8i8, Expand); - setOperationAction(ISD::SDIV, MVT::v16i8, Expand); - setOperationAction(ISD::SDIV, MVT::v1i16, Expand); - setOperationAction(ISD::SDIV, MVT::v4i16, Expand); - setOperationAction(ISD::SDIV, MVT::v8i16, Expand); - setOperationAction(ISD::SDIV, MVT::v1i32, Expand); - setOperationAction(ISD::SDIV, MVT::v2i32, Expand); - setOperationAction(ISD::SDIV, MVT::v4i32, Expand); - setOperationAction(ISD::SDIV, MVT::v1i64, Expand); - setOperationAction(ISD::SDIV, MVT::v2i64, Expand); - - setOperationAction(ISD::UDIV, MVT::v1i8, Expand); - setOperationAction(ISD::UDIV, MVT::v8i8, Expand); - setOperationAction(ISD::UDIV, MVT::v16i8, Expand); - setOperationAction(ISD::UDIV, MVT::v1i16, Expand); - setOperationAction(ISD::UDIV, MVT::v4i16, Expand); - setOperationAction(ISD::UDIV, MVT::v8i16, Expand); - setOperationAction(ISD::UDIV, MVT::v1i32, Expand); - setOperationAction(ISD::UDIV, MVT::v2i32, Expand); - setOperationAction(ISD::UDIV, MVT::v4i32, Expand); - setOperationAction(ISD::UDIV, MVT::v1i64, Expand); - setOperationAction(ISD::UDIV, MVT::v2i64, Expand); - - setOperationAction(ISD::SREM, MVT::v1i8, Expand); - setOperationAction(ISD::SREM, MVT::v8i8, Expand); - setOperationAction(ISD::SREM, MVT::v16i8, Expand); - setOperationAction(ISD::SREM, MVT::v1i16, Expand); - setOperationAction(ISD::SREM, MVT::v4i16, Expand); - setOperationAction(ISD::SREM, MVT::v8i16, Expand); - setOperationAction(ISD::SREM, MVT::v1i32, Expand); - setOperationAction(ISD::SREM, MVT::v2i32, Expand); - setOperationAction(ISD::SREM, MVT::v4i32, Expand); - setOperationAction(ISD::SREM, MVT::v1i64, Expand); - setOperationAction(ISD::SREM, MVT::v2i64, Expand); - - setOperationAction(ISD::UREM, MVT::v1i8, Expand); - setOperationAction(ISD::UREM, MVT::v8i8, Expand); - setOperationAction(ISD::UREM, MVT::v16i8, Expand); - setOperationAction(ISD::UREM, MVT::v1i16, Expand); - setOperationAction(ISD::UREM, MVT::v4i16, Expand); - setOperationAction(ISD::UREM, MVT::v8i16, Expand); - setOperationAction(ISD::UREM, MVT::v1i32, Expand); - setOperationAction(ISD::UREM, MVT::v2i32, Expand); - setOperationAction(ISD::UREM, MVT::v4i32, Expand); - setOperationAction(ISD::UREM, MVT::v1i64, Expand); - setOperationAction(ISD::UREM, MVT::v2i64, Expand); - - setOperationAction(ISD::FREM, MVT::v2f32, Expand); - setOperationAction(ISD::FREM, MVT::v4f32, Expand); - setOperationAction(ISD::FREM, MVT::v1f64, Expand); - setOperationAction(ISD::FREM, MVT::v2f64, Expand); - - setOperationAction(ISD::SELECT, MVT::v8i8, Expand); - setOperationAction(ISD::SELECT, MVT::v16i8, Expand); - setOperationAction(ISD::SELECT, MVT::v4i16, Expand); - setOperationAction(ISD::SELECT, MVT::v8i16, Expand); - setOperationAction(ISD::SELECT, MVT::v2i32, Expand); - setOperationAction(ISD::SELECT, MVT::v4i32, Expand); - setOperationAction(ISD::SELECT, MVT::v1i64, Expand); - setOperationAction(ISD::SELECT, MVT::v2i64, Expand); - setOperationAction(ISD::SELECT, MVT::v2f32, Expand); - setOperationAction(ISD::SELECT, MVT::v4f32, Expand); - setOperationAction(ISD::SELECT, MVT::v1f64, Expand); - setOperationAction(ISD::SELECT, MVT::v2f64, Expand); - - setOperationAction(ISD::SELECT_CC, MVT::v8i8, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v16i8, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v4i16, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v8i16, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v2i32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v4i32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v1i64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v2i64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v2f32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v4f32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v1f64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::v2f64, Custom); - - // Vector ExtLoad and TruncStore are expanded. - for (unsigned I = MVT::FIRST_VECTOR_VALUETYPE; - I <= MVT::LAST_VECTOR_VALUETYPE; ++I) { - MVT VT = (MVT::SimpleValueType) I; - setLoadExtAction(ISD::SEXTLOAD, VT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, Expand); - for (unsigned II = MVT::FIRST_VECTOR_VALUETYPE; - II <= MVT::LAST_VECTOR_VALUETYPE; ++II) { - MVT VT1 = (MVT::SimpleValueType) II; - // A TruncStore has two vector types of the same number of elements - // and different element sizes. - if (VT.getVectorNumElements() == VT1.getVectorNumElements() && - VT.getVectorElementType().getSizeInBits() - > VT1.getVectorElementType().getSizeInBits()) - setTruncStoreAction(VT, VT1, Expand); - } - } + // Add/Sub overflow ops with MVT::Glues are lowered to NZCV dependences. + setOperationAction(ISD::ADDC, MVT::i32, Custom); + setOperationAction(ISD::ADDE, MVT::i32, Custom); + setOperationAction(ISD::SUBC, MVT::i32, Custom); + setOperationAction(ISD::SUBE, MVT::i32, Custom); + setOperationAction(ISD::ADDC, MVT::i64, Custom); + setOperationAction(ISD::ADDE, MVT::i64, Custom); + setOperationAction(ISD::SUBC, MVT::i64, Custom); + setOperationAction(ISD::SUBE, MVT::i64, Custom); + + // AArch64 lacks both left-rotate and popcount instructions. + setOperationAction(ISD::ROTL, MVT::i32, Expand); + setOperationAction(ISD::ROTL, MVT::i64, Expand); - // There is no v1i64/v2i64 multiply, expand v1i64/v2i64 to GPR i64 multiply. - // FIXME: For a v2i64 multiply, we copy VPR to GPR and do 2 i64 multiplies, - // and then copy back to VPR. This solution may be optimized by Following 3 - // NEON instructions: - // pmull v2.1q, v0.1d, v1.1d - // pmull2 v3.1q, v0.2d, v1.2d - // ins v2.d[1], v3.d[0] - // As currently we can't verify the correctness of such assumption, we can - // do such optimization in the future. - setOperationAction(ISD::MUL, MVT::v1i64, Expand); - setOperationAction(ISD::MUL, MVT::v2i64, Expand); + // AArch64 doesn't have {U|S}MUL_LOHI. + setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); + setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::FCOS, MVT::v2f64, Expand); - setOperationAction(ISD::FCOS, MVT::v4f32, Expand); - setOperationAction(ISD::FCOS, MVT::v2f32, Expand); - setOperationAction(ISD::FSIN, MVT::v2f64, Expand); - setOperationAction(ISD::FSIN, MVT::v4f32, Expand); - setOperationAction(ISD::FSIN, MVT::v2f32, Expand); - setOperationAction(ISD::FPOW, MVT::v2f64, Expand); - setOperationAction(ISD::FPOW, MVT::v4f32, Expand); - setOperationAction(ISD::FPOW, MVT::v2f32, Expand); - } - setTargetDAGCombine(ISD::SETCC); - setTargetDAGCombine(ISD::SIGN_EXTEND); - setTargetDAGCombine(ISD::VSELECT); -} + // Expand the undefined-at-zero variants to cttz/ctlz to their defined-at-zero + // counterparts, which AArch64 supports directly. + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); -EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { - // It's reasonably important that this value matches the "natural" legal - // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself - // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64). - if (!VT.isVector()) return MVT::i32; - return VT.changeVectorElementTypeToInteger(); -} + setOperationAction(ISD::CTPOP, MVT::i32, Custom); + setOperationAction(ISD::CTPOP, MVT::i64, Custom); -static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, - unsigned &LdrOpc, - unsigned &StrOpc) { - static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword, - AArch64::LDXR_word, AArch64::LDXR_dword}; - static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword, - AArch64::LDAXR_word, AArch64::LDAXR_dword}; - static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword, - AArch64::STXR_word, AArch64::STXR_dword}; - static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword, - AArch64::STLXR_word, AArch64::STLXR_dword}; - - const unsigned *LoadOps, *StoreOps; - if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent) - LoadOps = LoadAcqs; - else - LoadOps = LoadBares; + setOperationAction(ISD::SDIVREM, MVT::i32, Expand); + setOperationAction(ISD::SDIVREM, MVT::i64, Expand); + setOperationAction(ISD::SREM, MVT::i32, Expand); + setOperationAction(ISD::SREM, MVT::i64, Expand); + setOperationAction(ISD::UDIVREM, MVT::i32, Expand); + setOperationAction(ISD::UDIVREM, MVT::i64, Expand); + setOperationAction(ISD::UREM, MVT::i32, Expand); + setOperationAction(ISD::UREM, MVT::i64, Expand); - if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent) - StoreOps = StoreRels; - else - StoreOps = StoreBares; + // Custom lower Add/Sub/Mul with overflow. + setOperationAction(ISD::SADDO, MVT::i32, Custom); + setOperationAction(ISD::SADDO, MVT::i64, Custom); + setOperationAction(ISD::UADDO, MVT::i32, Custom); + setOperationAction(ISD::UADDO, MVT::i64, Custom); + setOperationAction(ISD::SSUBO, MVT::i32, Custom); + setOperationAction(ISD::SSUBO, MVT::i64, Custom); + setOperationAction(ISD::USUBO, MVT::i32, Custom); + setOperationAction(ISD::USUBO, MVT::i64, Custom); + setOperationAction(ISD::SMULO, MVT::i32, Custom); + setOperationAction(ISD::SMULO, MVT::i64, Custom); + setOperationAction(ISD::UMULO, MVT::i32, Custom); + setOperationAction(ISD::UMULO, MVT::i64, Custom); - assert(isPowerOf2_32(Size) && Size <= 8 && - "unsupported size for atomic binary op!"); + setOperationAction(ISD::FSIN, MVT::f32, Expand); + setOperationAction(ISD::FSIN, MVT::f64, Expand); + setOperationAction(ISD::FCOS, MVT::f32, Expand); + setOperationAction(ISD::FCOS, MVT::f64, Expand); + setOperationAction(ISD::FPOW, MVT::f32, Expand); + setOperationAction(ISD::FPOW, MVT::f64, Expand); + setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); + setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); + + // AArch64 has implementations of a lot of rounding-like FP operations. + static MVT RoundingTypes[] = { MVT::f32, MVT::f64}; + for (unsigned I = 0; I < array_lengthof(RoundingTypes); ++I) { + MVT Ty = RoundingTypes[I]; + setOperationAction(ISD::FFLOOR, Ty, Legal); + setOperationAction(ISD::FNEARBYINT, Ty, Legal); + setOperationAction(ISD::FCEIL, Ty, Legal); + setOperationAction(ISD::FRINT, Ty, Legal); + setOperationAction(ISD::FTRUNC, Ty, Legal); + setOperationAction(ISD::FROUND, Ty, Legal); + } - LdrOpc = LoadOps[Log2_32(Size)]; - StrOpc = StoreOps[Log2_32(Size)]; -} + setOperationAction(ISD::PREFETCH, MVT::Other, Custom); -// FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really -// have value type mapped, and they are both being defined as MVT::untyped. -// Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost -// would fail to figure out the register pressure correctly. -std::pair<const TargetRegisterClass*, uint8_t> -AArch64TargetLowering::findRepresentativeClass(MVT VT) const{ - const TargetRegisterClass *RRC = 0; - uint8_t Cost = 1; - switch (VT.SimpleTy) { - default: - return TargetLowering::findRepresentativeClass(VT); - case MVT::v4i64: - RRC = &AArch64::QPairRegClass; - Cost = 2; - break; - case MVT::v8i64: - RRC = &AArch64::QQuadRegClass; - Cost = 4; - break; + if (Subtarget->isTargetMachO()) { + // For iOS, we don't want to the normal expansion of a libcall to + // sincos. We want to issue a libcall to __sincos_stret to avoid memory + // traffic. + setOperationAction(ISD::FSINCOS, MVT::f64, Custom); + setOperationAction(ISD::FSINCOS, MVT::f32, Custom); + } else { + setOperationAction(ISD::FSINCOS, MVT::f64, Expand); + setOperationAction(ISD::FSINCOS, MVT::f32, Expand); } - return std::make_pair(RRC, Cost); -} -MachineBasicBlock * -AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, - unsigned Size, - unsigned BinOpcode) const { - // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + // AArch64 does not have floating-point extending loads, i1 sign-extending + // load, floating-point truncating stores, or v2i32->v2i16 truncating store. + setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); + setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand); + setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); + setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand); + setTruncStoreAction(MVT::f32, MVT::f16, Expand); + setTruncStoreAction(MVT::f64, MVT::f32, Expand); + setTruncStoreAction(MVT::f64, MVT::f16, Expand); + setTruncStoreAction(MVT::f128, MVT::f80, Expand); + setTruncStoreAction(MVT::f128, MVT::f64, Expand); + setTruncStoreAction(MVT::f128, MVT::f32, Expand); + setTruncStoreAction(MVT::f128, MVT::f16, Expand); + // Indexed loads and stores are supported. + for (unsigned im = (unsigned)ISD::PRE_INC; + im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { + setIndexedLoadAction(im, MVT::i8, Legal); + setIndexedLoadAction(im, MVT::i16, Legal); + setIndexedLoadAction(im, MVT::i32, Legal); + setIndexedLoadAction(im, MVT::i64, Legal); + setIndexedLoadAction(im, MVT::f64, Legal); + setIndexedLoadAction(im, MVT::f32, Legal); + setIndexedStoreAction(im, MVT::i8, Legal); + setIndexedStoreAction(im, MVT::i16, Legal); + setIndexedStoreAction(im, MVT::i32, Legal); + setIndexedStoreAction(im, MVT::i64, Legal); + setIndexedStoreAction(im, MVT::f64, Legal); + setIndexedStoreAction(im, MVT::f32, Legal); + } - const BasicBlock *LLVM_BB = BB->getBasicBlock(); - MachineFunction *MF = BB->getParent(); - MachineFunction::iterator It = BB; - ++It; + // Trap. + setOperationAction(ISD::TRAP, MVT::Other, Legal); - unsigned dest = MI->getOperand(0).getReg(); - unsigned ptr = MI->getOperand(1).getReg(); - unsigned incr = MI->getOperand(2).getReg(); - AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); - DebugLoc dl = MI->getDebugLoc(); - - MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); - - unsigned ldrOpc, strOpc; - getExclusiveOperation(Size, Ord, ldrOpc, strOpc); - - MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); - MF->insert(It, loopMBB); - MF->insert(It, exitMBB); - - // Transfer the remainder of BB and its successor edges to exitMBB. - exitMBB->splice(exitMBB->begin(), BB, - std::next(MachineBasicBlock::iterator(MI)), BB->end()); - exitMBB->transferSuccessorsAndUpdatePHIs(BB); - - const TargetRegisterClass *TRC - = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; - unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); - - // thisMBB: - // ... - // fallthrough --> loopMBB - BB->addSuccessor(loopMBB); - - // loopMBB: - // ldxr dest, ptr - // <binop> scratch, dest, incr - // stxr stxr_status, scratch, ptr - // cbnz stxr_status, loopMBB - // fallthrough --> exitMBB - BB = loopMBB; - BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); - if (BinOpcode) { - // All arithmetic operations we'll be creating are designed to take an extra - // shift or extend operand, which we can conveniently set to zero. - - // Operand order needs to go the other way for NAND. - if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl) - BuildMI(BB, dl, TII->get(BinOpcode), scratch) - .addReg(incr).addReg(dest).addImm(0); - else - BuildMI(BB, dl, TII->get(BinOpcode), scratch) - .addReg(dest).addReg(incr).addImm(0); - } + // We combine OR nodes for bitfield operations. + setTargetDAGCombine(ISD::OR); - // From the stxr, the register is GPR32; from the cmp it's GPR32wsp - unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); - MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); + // Vector add and sub nodes may conceal a high-half opportunity. + // Also, try to fold ADD into CSINC/CSINV.. + setTargetDAGCombine(ISD::ADD); + setTargetDAGCombine(ISD::SUB); - BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr); - BuildMI(BB, dl, TII->get(AArch64::CBNZw)) - .addReg(stxr_status).addMBB(loopMBB); + setTargetDAGCombine(ISD::XOR); + setTargetDAGCombine(ISD::SINT_TO_FP); + setTargetDAGCombine(ISD::UINT_TO_FP); - BB->addSuccessor(loopMBB); - BB->addSuccessor(exitMBB); + setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); - // exitMBB: - // ... - BB = exitMBB; + setTargetDAGCombine(ISD::ANY_EXTEND); + setTargetDAGCombine(ISD::ZERO_EXTEND); + setTargetDAGCombine(ISD::SIGN_EXTEND); + setTargetDAGCombine(ISD::BITCAST); + setTargetDAGCombine(ISD::CONCAT_VECTORS); + setTargetDAGCombine(ISD::STORE); - MI->eraseFromParent(); // The instruction is gone now. + setTargetDAGCombine(ISD::MUL); - return BB; -} + setTargetDAGCombine(ISD::SELECT); + setTargetDAGCombine(ISD::VSELECT); -MachineBasicBlock * -AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI, - MachineBasicBlock *BB, - unsigned Size, - unsigned CmpOp, - A64CC::CondCodes Cond) const { - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + setTargetDAGCombine(ISD::INTRINSIC_VOID); + setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); + setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); - const BasicBlock *LLVM_BB = BB->getBasicBlock(); - MachineFunction *MF = BB->getParent(); - MachineFunction::iterator It = BB; - ++It; + MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 8; + MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 4; + MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = 4; - unsigned dest = MI->getOperand(0).getReg(); - unsigned ptr = MI->getOperand(1).getReg(); - unsigned incr = MI->getOperand(2).getReg(); - AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); + setStackPointerRegisterToSaveRestore(AArch64::SP); - unsigned oldval = dest; - DebugLoc dl = MI->getDebugLoc(); + setSchedulingPreference(Sched::Hybrid); - MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); - const TargetRegisterClass *TRC, *TRCsp; - if (Size == 8) { - TRC = &AArch64::GPR64RegClass; - TRCsp = &AArch64::GPR64xspRegClass; - } else { - TRC = &AArch64::GPR32RegClass; - TRCsp = &AArch64::GPR32wspRegClass; - } + // Enable TBZ/TBNZ + MaskAndBranchFoldingIsLegal = true; - unsigned ldrOpc, strOpc; - getExclusiveOperation(Size, Ord, ldrOpc, strOpc); + setMinFunctionAlignment(2); - MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); - MF->insert(It, loopMBB); - MF->insert(It, exitMBB); + RequireStrictAlign = (Align == StrictAlign); - // Transfer the remainder of BB and its successor edges to exitMBB. - exitMBB->splice(exitMBB->begin(), BB, - std::next(MachineBasicBlock::iterator(MI)), BB->end()); - exitMBB->transferSuccessorsAndUpdatePHIs(BB); + setHasExtractBitsInsn(true); - unsigned scratch = MRI.createVirtualRegister(TRC); - MRI.constrainRegClass(scratch, TRCsp); + if (Subtarget->hasNEON()) { + // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to + // silliness like this: + setOperationAction(ISD::FABS, MVT::v1f64, Expand); + setOperationAction(ISD::FADD, MVT::v1f64, Expand); + setOperationAction(ISD::FCEIL, MVT::v1f64, Expand); + setOperationAction(ISD::FCOPYSIGN, MVT::v1f64, Expand); + setOperationAction(ISD::FCOS, MVT::v1f64, Expand); + setOperationAction(ISD::FDIV, MVT::v1f64, Expand); + setOperationAction(ISD::FFLOOR, MVT::v1f64, Expand); + setOperationAction(ISD::FMA, MVT::v1f64, Expand); + setOperationAction(ISD::FMUL, MVT::v1f64, Expand); + setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Expand); + setOperationAction(ISD::FNEG, MVT::v1f64, Expand); + setOperationAction(ISD::FPOW, MVT::v1f64, Expand); + setOperationAction(ISD::FREM, MVT::v1f64, Expand); + setOperationAction(ISD::FROUND, MVT::v1f64, Expand); + setOperationAction(ISD::FRINT, MVT::v1f64, Expand); + setOperationAction(ISD::FSIN, MVT::v1f64, Expand); + setOperationAction(ISD::FSINCOS, MVT::v1f64, Expand); + setOperationAction(ISD::FSQRT, MVT::v1f64, Expand); + setOperationAction(ISD::FSUB, MVT::v1f64, Expand); + setOperationAction(ISD::FTRUNC, MVT::v1f64, Expand); + setOperationAction(ISD::SETCC, MVT::v1f64, Expand); + setOperationAction(ISD::BR_CC, MVT::v1f64, Expand); + setOperationAction(ISD::SELECT, MVT::v1f64, Expand); + setOperationAction(ISD::SELECT_CC, MVT::v1f64, Expand); + setOperationAction(ISD::FP_EXTEND, MVT::v1f64, Expand); - // thisMBB: - // ... - // fallthrough --> loopMBB - BB->addSuccessor(loopMBB); + setOperationAction(ISD::FP_TO_SINT, MVT::v1i64, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::v1i64, Expand); + setOperationAction(ISD::SINT_TO_FP, MVT::v1i64, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::v1i64, Expand); + setOperationAction(ISD::FP_ROUND, MVT::v1f64, Expand); - // loopMBB: - // ldxr dest, ptr - // cmp incr, dest (, sign extend if necessary) - // csel scratch, dest, incr, cond - // stxr stxr_status, scratch, ptr - // cbnz stxr_status, loopMBB - // fallthrough --> exitMBB - BB = loopMBB; - BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); + setOperationAction(ISD::MUL, MVT::v1i64, Expand); - // Build compare and cmov instructions. - MRI.constrainRegClass(incr, TRCsp); - BuildMI(BB, dl, TII->get(CmpOp)) - .addReg(incr).addReg(oldval).addImm(0); + // AArch64 doesn't have a direct vector ->f32 conversion instructions for + // elements smaller than i32, so promote the input to i32 first. + setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Promote); + setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Promote); + setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Promote); + setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Promote); + // Similarly, there is no direct i32 -> f64 vector conversion instruction. + setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); + setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); + setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom); + setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom); - BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc), - scratch) - .addReg(oldval).addReg(incr).addImm(Cond); + // AArch64 doesn't have MUL.2d: + setOperationAction(ISD::MUL, MVT::v2i64, Expand); + setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal); + setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); + // Likewise, narrowing and extending vector loads/stores aren't handled + // directly. + for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { + + setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, + Expand); + + setOperationAction(ISD::MULHS, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::MULHU, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); + + setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); + + for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) + setTruncStoreAction((MVT::SimpleValueType)VT, + (MVT::SimpleValueType)InnerVT, Expand); + setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); + setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); + setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); + } - unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); - MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); + // AArch64 has implementations of a lot of rounding-like FP operations. + static MVT RoundingVecTypes[] = {MVT::v2f32, MVT::v4f32, MVT::v2f64 }; + for (unsigned I = 0; I < array_lengthof(RoundingVecTypes); ++I) { + MVT Ty = RoundingVecTypes[I]; + setOperationAction(ISD::FFLOOR, Ty, Legal); + setOperationAction(ISD::FNEARBYINT, Ty, Legal); + setOperationAction(ISD::FCEIL, Ty, Legal); + setOperationAction(ISD::FRINT, Ty, Legal); + setOperationAction(ISD::FTRUNC, Ty, Legal); + setOperationAction(ISD::FROUND, Ty, Legal); + } + } +} - BuildMI(BB, dl, TII->get(strOpc), stxr_status) - .addReg(scratch).addReg(ptr); - BuildMI(BB, dl, TII->get(AArch64::CBNZw)) - .addReg(stxr_status).addMBB(loopMBB); +void AArch64TargetLowering::addTypeForNEON(EVT VT, EVT PromotedBitwiseVT) { + if (VT == MVT::v2f32) { + setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); + AddPromotedToType(ISD::LOAD, VT.getSimpleVT(), MVT::v2i32); - BB->addSuccessor(loopMBB); - BB->addSuccessor(exitMBB); + setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); + AddPromotedToType(ISD::STORE, VT.getSimpleVT(), MVT::v2i32); + } else if (VT == MVT::v2f64 || VT == MVT::v4f32) { + setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote); + AddPromotedToType(ISD::LOAD, VT.getSimpleVT(), MVT::v2i64); - // exitMBB: - // ... - BB = exitMBB; + setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote); + AddPromotedToType(ISD::STORE, VT.getSimpleVT(), MVT::v2i64); + } - MI->eraseFromParent(); // The instruction is gone now. + // Mark vector float intrinsics as expand. + if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) { + setOperationAction(ISD::FSIN, VT.getSimpleVT(), Expand); + setOperationAction(ISD::FCOS, VT.getSimpleVT(), Expand); + setOperationAction(ISD::FPOWI, VT.getSimpleVT(), Expand); + setOperationAction(ISD::FPOW, VT.getSimpleVT(), Expand); + setOperationAction(ISD::FLOG, VT.getSimpleVT(), Expand); + setOperationAction(ISD::FLOG2, VT.getSimpleVT(), Expand); + setOperationAction(ISD::FLOG10, VT.getSimpleVT(), Expand); + setOperationAction(ISD::FEXP, VT.getSimpleVT(), Expand); + setOperationAction(ISD::FEXP2, VT.getSimpleVT(), Expand); + } - return BB; + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom); + setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom); + setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Custom); + setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); + setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom); + setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); + setOperationAction(ISD::AND, VT.getSimpleVT(), Custom); + setOperationAction(ISD::OR, VT.getSimpleVT(), Custom); + setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom); + setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal); + + setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); + setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); + setOperationAction(ISD::VSELECT, VT.getSimpleVT(), Expand); + setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand); + + // CNT supports only B element sizes. + if (VT != MVT::v8i8 && VT != MVT::v16i8) + setOperationAction(ISD::CTPOP, VT.getSimpleVT(), Expand); + + setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand); + setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand); + setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand); + setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand); + setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand); + + setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Custom); + setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Custom); + + if (Subtarget->isLittleEndian()) { + for (unsigned im = (unsigned)ISD::PRE_INC; + im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { + setIndexedLoadAction(im, VT.getSimpleVT(), Legal); + setIndexedStoreAction(im, VT.getSimpleVT(), Legal); + } + } } -MachineBasicBlock * -AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI, - MachineBasicBlock *BB, - unsigned Size) const { - unsigned dest = MI->getOperand(0).getReg(); - unsigned ptr = MI->getOperand(1).getReg(); - unsigned oldval = MI->getOperand(2).getReg(); - unsigned newval = MI->getOperand(3).getReg(); - AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm()); - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - DebugLoc dl = MI->getDebugLoc(); - - MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); - const TargetRegisterClass *TRCsp; - TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass; - - unsigned ldrOpc, strOpc; - getExclusiveOperation(Size, Ord, ldrOpc, strOpc); - - MachineFunction *MF = BB->getParent(); - const BasicBlock *LLVM_BB = BB->getBasicBlock(); - MachineFunction::iterator It = BB; - ++It; // insert the new blocks after the current block - - MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); - MF->insert(It, loop1MBB); - MF->insert(It, loop2MBB); - MF->insert(It, exitMBB); - - // Transfer the remainder of BB and its successor edges to exitMBB. - exitMBB->splice(exitMBB->begin(), BB, - std::next(MachineBasicBlock::iterator(MI)), BB->end()); - exitMBB->transferSuccessorsAndUpdatePHIs(BB); - - // thisMBB: - // ... - // fallthrough --> loop1MBB - BB->addSuccessor(loop1MBB); - - // loop1MBB: - // ldxr dest, [ptr] - // cmp dest, oldval - // b.ne exitMBB - BB = loop1MBB; - BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); - - unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl; - MRI.constrainRegClass(dest, TRCsp); - BuildMI(BB, dl, TII->get(CmpOp)) - .addReg(dest).addReg(oldval).addImm(0); - BuildMI(BB, dl, TII->get(AArch64::Bcc)) - .addImm(A64CC::NE).addMBB(exitMBB); - BB->addSuccessor(loop2MBB); - BB->addSuccessor(exitMBB); - - // loop2MBB: - // strex stxr_status, newval, [ptr] - // cbnz stxr_status, loop1MBB - BB = loop2MBB; - unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); - MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); - - BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr); - BuildMI(BB, dl, TII->get(AArch64::CBNZw)) - .addReg(stxr_status).addMBB(loop1MBB); - BB->addSuccessor(loop1MBB); - BB->addSuccessor(exitMBB); - - // exitMBB: - // ... - BB = exitMBB; - - MI->eraseFromParent(); // The instruction is gone now. - - return BB; +void AArch64TargetLowering::addDRTypeForNEON(MVT VT) { + addRegisterClass(VT, &AArch64::FPR64RegClass); + addTypeForNEON(VT, MVT::v2i32); +} + +void AArch64TargetLowering::addQRTypeForNEON(MVT VT) { + addRegisterClass(VT, &AArch64::FPR128RegClass); + addTypeForNEON(VT, MVT::v4i32); +} + +EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { + if (!VT.isVector()) + return MVT::i32; + return VT.changeVectorElementTypeToInteger(); +} + +/// computeKnownBitsForTargetNode - Determine which of the bits specified in +/// Mask are known to be either zero or one and return them in the +/// KnownZero/KnownOne bitsets. +void AArch64TargetLowering::computeKnownBitsForTargetNode( + const SDValue Op, APInt &KnownZero, APInt &KnownOne, + const SelectionDAG &DAG, unsigned Depth) const { + switch (Op.getOpcode()) { + default: + break; + case AArch64ISD::CSEL: { + APInt KnownZero2, KnownOne2; + DAG.computeKnownBits(Op->getOperand(0), KnownZero, KnownOne, Depth + 1); + DAG.computeKnownBits(Op->getOperand(1), KnownZero2, KnownOne2, Depth + 1); + KnownZero &= KnownZero2; + KnownOne &= KnownOne2; + break; + } + case ISD::INTRINSIC_W_CHAIN: { + ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); + Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); + switch (IntID) { + default: return; + case Intrinsic::aarch64_ldaxr: + case Intrinsic::aarch64_ldxr: { + unsigned BitWidth = KnownOne.getBitWidth(); + EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); + unsigned MemBits = VT.getScalarType().getSizeInBits(); + KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); + return; + } + } + break; + } + case ISD::INTRINSIC_WO_CHAIN: + case ISD::INTRINSIC_VOID: { + unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); + switch (IntNo) { + default: + break; + case Intrinsic::aarch64_neon_umaxv: + case Intrinsic::aarch64_neon_uminv: { + // Figure out the datatype of the vector operand. The UMINV instruction + // will zero extend the result, so we can mark as known zero all the + // bits larger than the element datatype. 32-bit or larget doesn't need + // this as those are legal types and will be handled by isel directly. + MVT VT = Op.getOperand(1).getValueType().getSimpleVT(); + unsigned BitWidth = KnownZero.getBitWidth(); + if (VT == MVT::v8i8 || VT == MVT::v16i8) { + assert(BitWidth >= 8 && "Unexpected width!"); + APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8); + KnownZero |= Mask; + } else if (VT == MVT::v4i16 || VT == MVT::v8i16) { + assert(BitWidth >= 16 && "Unexpected width!"); + APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16); + KnownZero |= Mask; + } + break; + } break; + } + } + } +} + +MVT AArch64TargetLowering::getScalarShiftAmountTy(EVT LHSTy) const { + return MVT::i64; +} + +unsigned AArch64TargetLowering::getMaximalGlobalOffset() const { + // FIXME: On AArch64, this depends on the type. + // Basically, the addressable offsets are o to 4095 * Ty.getSizeInBytes(). + // and the offset has to be a multiple of the related size in bytes. + return 4095; +} + +FastISel * +AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, + const TargetLibraryInfo *libInfo) const { + return AArch64::createFastISel(funcInfo, libInfo); +} + +const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { + switch (Opcode) { + default: + return nullptr; + case AArch64ISD::CALL: return "AArch64ISD::CALL"; + case AArch64ISD::ADRP: return "AArch64ISD::ADRP"; + case AArch64ISD::ADDlow: return "AArch64ISD::ADDlow"; + case AArch64ISD::LOADgot: return "AArch64ISD::LOADgot"; + case AArch64ISD::RET_FLAG: return "AArch64ISD::RET_FLAG"; + case AArch64ISD::BRCOND: return "AArch64ISD::BRCOND"; + case AArch64ISD::CSEL: return "AArch64ISD::CSEL"; + case AArch64ISD::FCSEL: return "AArch64ISD::FCSEL"; + case AArch64ISD::CSINV: return "AArch64ISD::CSINV"; + case AArch64ISD::CSNEG: return "AArch64ISD::CSNEG"; + case AArch64ISD::CSINC: return "AArch64ISD::CSINC"; + case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER"; + case AArch64ISD::TLSDESC_CALL: return "AArch64ISD::TLSDESC_CALL"; + case AArch64ISD::ADC: return "AArch64ISD::ADC"; + case AArch64ISD::SBC: return "AArch64ISD::SBC"; + case AArch64ISD::ADDS: return "AArch64ISD::ADDS"; + case AArch64ISD::SUBS: return "AArch64ISD::SUBS"; + case AArch64ISD::ADCS: return "AArch64ISD::ADCS"; + case AArch64ISD::SBCS: return "AArch64ISD::SBCS"; + case AArch64ISD::ANDS: return "AArch64ISD::ANDS"; + case AArch64ISD::FCMP: return "AArch64ISD::FCMP"; + case AArch64ISD::FMIN: return "AArch64ISD::FMIN"; + case AArch64ISD::FMAX: return "AArch64ISD::FMAX"; + case AArch64ISD::DUP: return "AArch64ISD::DUP"; + case AArch64ISD::DUPLANE8: return "AArch64ISD::DUPLANE8"; + case AArch64ISD::DUPLANE16: return "AArch64ISD::DUPLANE16"; + case AArch64ISD::DUPLANE32: return "AArch64ISD::DUPLANE32"; + case AArch64ISD::DUPLANE64: return "AArch64ISD::DUPLANE64"; + case AArch64ISD::MOVI: return "AArch64ISD::MOVI"; + case AArch64ISD::MOVIshift: return "AArch64ISD::MOVIshift"; + case AArch64ISD::MOVIedit: return "AArch64ISD::MOVIedit"; + case AArch64ISD::MOVImsl: return "AArch64ISD::MOVImsl"; + case AArch64ISD::FMOV: return "AArch64ISD::FMOV"; + case AArch64ISD::MVNIshift: return "AArch64ISD::MVNIshift"; + case AArch64ISD::MVNImsl: return "AArch64ISD::MVNImsl"; + case AArch64ISD::BICi: return "AArch64ISD::BICi"; + case AArch64ISD::ORRi: return "AArch64ISD::ORRi"; + case AArch64ISD::BSL: return "AArch64ISD::BSL"; + case AArch64ISD::NEG: return "AArch64ISD::NEG"; + case AArch64ISD::EXTR: return "AArch64ISD::EXTR"; + case AArch64ISD::ZIP1: return "AArch64ISD::ZIP1"; + case AArch64ISD::ZIP2: return "AArch64ISD::ZIP2"; + case AArch64ISD::UZP1: return "AArch64ISD::UZP1"; + case AArch64ISD::UZP2: return "AArch64ISD::UZP2"; + case AArch64ISD::TRN1: return "AArch64ISD::TRN1"; + case AArch64ISD::TRN2: return "AArch64ISD::TRN2"; + case AArch64ISD::REV16: return "AArch64ISD::REV16"; + case AArch64ISD::REV32: return "AArch64ISD::REV32"; + case AArch64ISD::REV64: return "AArch64ISD::REV64"; + case AArch64ISD::EXT: return "AArch64ISD::EXT"; + case AArch64ISD::VSHL: return "AArch64ISD::VSHL"; + case AArch64ISD::VLSHR: return "AArch64ISD::VLSHR"; + case AArch64ISD::VASHR: return "AArch64ISD::VASHR"; + case AArch64ISD::CMEQ: return "AArch64ISD::CMEQ"; + case AArch64ISD::CMGE: return "AArch64ISD::CMGE"; + case AArch64ISD::CMGT: return "AArch64ISD::CMGT"; + case AArch64ISD::CMHI: return "AArch64ISD::CMHI"; + case AArch64ISD::CMHS: return "AArch64ISD::CMHS"; + case AArch64ISD::FCMEQ: return "AArch64ISD::FCMEQ"; + case AArch64ISD::FCMGE: return "AArch64ISD::FCMGE"; + case AArch64ISD::FCMGT: return "AArch64ISD::FCMGT"; + case AArch64ISD::CMEQz: return "AArch64ISD::CMEQz"; + case AArch64ISD::CMGEz: return "AArch64ISD::CMGEz"; + case AArch64ISD::CMGTz: return "AArch64ISD::CMGTz"; + case AArch64ISD::CMLEz: return "AArch64ISD::CMLEz"; + case AArch64ISD::CMLTz: return "AArch64ISD::CMLTz"; + case AArch64ISD::FCMEQz: return "AArch64ISD::FCMEQz"; + case AArch64ISD::FCMGEz: return "AArch64ISD::FCMGEz"; + case AArch64ISD::FCMGTz: return "AArch64ISD::FCMGTz"; + case AArch64ISD::FCMLEz: return "AArch64ISD::FCMLEz"; + case AArch64ISD::FCMLTz: return "AArch64ISD::FCMLTz"; + case AArch64ISD::NOT: return "AArch64ISD::NOT"; + case AArch64ISD::BIT: return "AArch64ISD::BIT"; + case AArch64ISD::CBZ: return "AArch64ISD::CBZ"; + case AArch64ISD::CBNZ: return "AArch64ISD::CBNZ"; + case AArch64ISD::TBZ: return "AArch64ISD::TBZ"; + case AArch64ISD::TBNZ: return "AArch64ISD::TBNZ"; + case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN"; + case AArch64ISD::SITOF: return "AArch64ISD::SITOF"; + case AArch64ISD::UITOF: return "AArch64ISD::UITOF"; + case AArch64ISD::SQSHL_I: return "AArch64ISD::SQSHL_I"; + case AArch64ISD::UQSHL_I: return "AArch64ISD::UQSHL_I"; + case AArch64ISD::SRSHR_I: return "AArch64ISD::SRSHR_I"; + case AArch64ISD::URSHR_I: return "AArch64ISD::URSHR_I"; + case AArch64ISD::SQSHLU_I: return "AArch64ISD::SQSHLU_I"; + case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge"; + case AArch64ISD::LD2post: return "AArch64ISD::LD2post"; + case AArch64ISD::LD3post: return "AArch64ISD::LD3post"; + case AArch64ISD::LD4post: return "AArch64ISD::LD4post"; + case AArch64ISD::ST2post: return "AArch64ISD::ST2post"; + case AArch64ISD::ST3post: return "AArch64ISD::ST3post"; + case AArch64ISD::ST4post: return "AArch64ISD::ST4post"; + case AArch64ISD::LD1x2post: return "AArch64ISD::LD1x2post"; + case AArch64ISD::LD1x3post: return "AArch64ISD::LD1x3post"; + case AArch64ISD::LD1x4post: return "AArch64ISD::LD1x4post"; + case AArch64ISD::ST1x2post: return "AArch64ISD::ST1x2post"; + case AArch64ISD::ST1x3post: return "AArch64ISD::ST1x3post"; + case AArch64ISD::ST1x4post: return "AArch64ISD::ST1x4post"; + case AArch64ISD::LD1DUPpost: return "AArch64ISD::LD1DUPpost"; + case AArch64ISD::LD2DUPpost: return "AArch64ISD::LD2DUPpost"; + case AArch64ISD::LD3DUPpost: return "AArch64ISD::LD3DUPpost"; + case AArch64ISD::LD4DUPpost: return "AArch64ISD::LD4DUPpost"; + case AArch64ISD::LD1LANEpost: return "AArch64ISD::LD1LANEpost"; + case AArch64ISD::LD2LANEpost: return "AArch64ISD::LD2LANEpost"; + case AArch64ISD::LD3LANEpost: return "AArch64ISD::LD3LANEpost"; + case AArch64ISD::LD4LANEpost: return "AArch64ISD::LD4LANEpost"; + case AArch64ISD::ST2LANEpost: return "AArch64ISD::ST2LANEpost"; + case AArch64ISD::ST3LANEpost: return "AArch64ISD::ST3LANEpost"; + case AArch64ISD::ST4LANEpost: return "AArch64ISD::ST4LANEpost"; + } } MachineBasicBlock * AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI, MachineBasicBlock *MBB) const { - // We materialise the F128CSEL pseudo-instruction using conditional branches - // and loads, giving an instruciton sequence like: - // str q0, [sp] - // b.ne IfTrue - // b Finish - // IfTrue: - // str q1, [sp] - // Finish: - // ldr q0, [sp] - // - // Using virtual registers would probably not be beneficial since COPY - // instructions are expensive for f128 (there's no actual instruction to - // implement them). - // - // An alternative would be to do an integer-CSEL on some address. E.g.: - // mov x0, sp - // add x1, sp, #16 - // str q0, [x0] - // str q1, [x1] - // csel x0, x0, x1, ne - // ldr q0, [x0] - // - // It's unclear which approach is actually optimal. + // We materialise the F128CSEL pseudo-instruction as some control flow and a + // phi node: + + // OrigBB: + // [... previous instrs leading to comparison ...] + // b.ne TrueBB + // b EndBB + // TrueBB: + // ; Fallthrough + // EndBB: + // Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB] + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); MachineFunction *MF = MBB->getParent(); const BasicBlock *LLVM_BB = MBB->getBasicBlock(); @@ -906,49 +792,24 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI, MBB->end()); EndBB->transferSuccessorsAndUpdatePHIs(MBB); - // We need somewhere to store the f128 value needed. - int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16); - - // [... start of incoming MBB ...] - // str qIFFALSE, [sp] - // b.cc IfTrue - // b Done - BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR)) - .addReg(IfFalseReg) - .addFrameIndex(ScratchFI) - .addImm(0); - BuildMI(MBB, DL, TII->get(AArch64::Bcc)) - .addImm(CondCode) - .addMBB(TrueBB); - BuildMI(MBB, DL, TII->get(AArch64::Bimm)) - .addMBB(EndBB); + BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB); + BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB); MBB->addSuccessor(TrueBB); MBB->addSuccessor(EndBB); + // TrueBB falls through to the end. + TrueBB->addSuccessor(EndBB); + if (!NZCVKilled) { - // NZCV is live-through TrueBB. TrueBB->addLiveIn(AArch64::NZCV); EndBB->addLiveIn(AArch64::NZCV); } - // IfTrue: - // str qIFTRUE, [sp] - BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR)) - .addReg(IfTrueReg) - .addFrameIndex(ScratchFI) - .addImm(0); - - // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the - // blocks. - TrueBB->addSuccessor(EndBB); - - // Done: - // ldr qDEST, [sp] - // [... rest of incoming MBB ...] - MachineInstr *StartOfEnd = EndBB->begin(); - BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg) - .addFrameIndex(ScratchFI) - .addImm(0); + BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg) + .addReg(IfTrueReg) + .addMBB(TrueBB) + .addReg(IfFalseReg) + .addMBB(MBB); MI->eraseFromParent(); return EndBB; @@ -956,853 +817,1140 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI, MachineBasicBlock * AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, - MachineBasicBlock *MBB) const { + MachineBasicBlock *BB) const { switch (MI->getOpcode()) { - default: llvm_unreachable("Unhandled instruction with custom inserter"); + default: +#ifndef NDEBUG + MI->dump(); +#endif + assert(0 && "Unexpected instruction for custom inserter!"); + break; + case AArch64::F128CSEL: - return EmitF128CSEL(MI, MBB); - case AArch64::ATOMIC_LOAD_ADD_I8: - return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl); - case AArch64::ATOMIC_LOAD_ADD_I16: - return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl); - case AArch64::ATOMIC_LOAD_ADD_I32: - return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl); - case AArch64::ATOMIC_LOAD_ADD_I64: - return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl); - - case AArch64::ATOMIC_LOAD_SUB_I8: - return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl); - case AArch64::ATOMIC_LOAD_SUB_I16: - return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl); - case AArch64::ATOMIC_LOAD_SUB_I32: - return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl); - case AArch64::ATOMIC_LOAD_SUB_I64: - return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl); - - case AArch64::ATOMIC_LOAD_AND_I8: - return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl); - case AArch64::ATOMIC_LOAD_AND_I16: - return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl); - case AArch64::ATOMIC_LOAD_AND_I32: - return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl); - case AArch64::ATOMIC_LOAD_AND_I64: - return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl); - - case AArch64::ATOMIC_LOAD_OR_I8: - return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl); - case AArch64::ATOMIC_LOAD_OR_I16: - return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl); - case AArch64::ATOMIC_LOAD_OR_I32: - return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl); - case AArch64::ATOMIC_LOAD_OR_I64: - return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl); - - case AArch64::ATOMIC_LOAD_XOR_I8: - return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl); - case AArch64::ATOMIC_LOAD_XOR_I16: - return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl); - case AArch64::ATOMIC_LOAD_XOR_I32: - return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl); - case AArch64::ATOMIC_LOAD_XOR_I64: - return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl); - - case AArch64::ATOMIC_LOAD_NAND_I8: - return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl); - case AArch64::ATOMIC_LOAD_NAND_I16: - return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl); - case AArch64::ATOMIC_LOAD_NAND_I32: - return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl); - case AArch64::ATOMIC_LOAD_NAND_I64: - return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl); - - case AArch64::ATOMIC_LOAD_MIN_I8: - return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT); - case AArch64::ATOMIC_LOAD_MIN_I16: - return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT); - case AArch64::ATOMIC_LOAD_MIN_I32: - return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT); - case AArch64::ATOMIC_LOAD_MIN_I64: - return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT); - - case AArch64::ATOMIC_LOAD_MAX_I8: - return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT); - case AArch64::ATOMIC_LOAD_MAX_I16: - return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT); - case AArch64::ATOMIC_LOAD_MAX_I32: - return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT); - case AArch64::ATOMIC_LOAD_MAX_I64: - return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT); - - case AArch64::ATOMIC_LOAD_UMIN_I8: - return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI); - case AArch64::ATOMIC_LOAD_UMIN_I16: - return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI); - case AArch64::ATOMIC_LOAD_UMIN_I32: - return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI); - case AArch64::ATOMIC_LOAD_UMIN_I64: - return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI); - - case AArch64::ATOMIC_LOAD_UMAX_I8: - return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO); - case AArch64::ATOMIC_LOAD_UMAX_I16: - return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO); - case AArch64::ATOMIC_LOAD_UMAX_I32: - return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO); - case AArch64::ATOMIC_LOAD_UMAX_I64: - return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO); - - case AArch64::ATOMIC_SWAP_I8: - return emitAtomicBinary(MI, MBB, 1, 0); - case AArch64::ATOMIC_SWAP_I16: - return emitAtomicBinary(MI, MBB, 2, 0); - case AArch64::ATOMIC_SWAP_I32: - return emitAtomicBinary(MI, MBB, 4, 0); - case AArch64::ATOMIC_SWAP_I64: - return emitAtomicBinary(MI, MBB, 8, 0); - - case AArch64::ATOMIC_CMP_SWAP_I8: - return emitAtomicCmpSwap(MI, MBB, 1); - case AArch64::ATOMIC_CMP_SWAP_I16: - return emitAtomicCmpSwap(MI, MBB, 2); - case AArch64::ATOMIC_CMP_SWAP_I32: - return emitAtomicCmpSwap(MI, MBB, 4); - case AArch64::ATOMIC_CMP_SWAP_I64: - return emitAtomicCmpSwap(MI, MBB, 8); + return EmitF128CSEL(MI, BB); + + case TargetOpcode::STACKMAP: + case TargetOpcode::PATCHPOINT: + return emitPatchPoint(MI, BB); } + llvm_unreachable("Unexpected instruction for custom inserter!"); } +//===----------------------------------------------------------------------===// +// AArch64 Lowering private implementation. +//===----------------------------------------------------------------------===// -const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { - switch (Opcode) { - case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC"; - case AArch64ISD::Call: return "AArch64ISD::Call"; - case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV"; - case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad"; - case AArch64ISD::BFI: return "AArch64ISD::BFI"; - case AArch64ISD::EXTR: return "AArch64ISD::EXTR"; - case AArch64ISD::Ret: return "AArch64ISD::Ret"; - case AArch64ISD::SBFX: return "AArch64ISD::SBFX"; - case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC"; - case AArch64ISD::SETCC: return "AArch64ISD::SETCC"; - case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN"; - case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER"; - case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL"; - case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge"; - case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall"; - - case AArch64ISD::NEON_MOVIMM: - return "AArch64ISD::NEON_MOVIMM"; - case AArch64ISD::NEON_MVNIMM: - return "AArch64ISD::NEON_MVNIMM"; - case AArch64ISD::NEON_FMOVIMM: - return "AArch64ISD::NEON_FMOVIMM"; - case AArch64ISD::NEON_CMP: - return "AArch64ISD::NEON_CMP"; - case AArch64ISD::NEON_CMPZ: - return "AArch64ISD::NEON_CMPZ"; - case AArch64ISD::NEON_TST: - return "AArch64ISD::NEON_TST"; - case AArch64ISD::NEON_QSHLs: - return "AArch64ISD::NEON_QSHLs"; - case AArch64ISD::NEON_QSHLu: - return "AArch64ISD::NEON_QSHLu"; - case AArch64ISD::NEON_VDUP: - return "AArch64ISD::NEON_VDUP"; - case AArch64ISD::NEON_VDUPLANE: - return "AArch64ISD::NEON_VDUPLANE"; - case AArch64ISD::NEON_REV16: - return "AArch64ISD::NEON_REV16"; - case AArch64ISD::NEON_REV32: - return "AArch64ISD::NEON_REV32"; - case AArch64ISD::NEON_REV64: - return "AArch64ISD::NEON_REV64"; - case AArch64ISD::NEON_UZP1: - return "AArch64ISD::NEON_UZP1"; - case AArch64ISD::NEON_UZP2: - return "AArch64ISD::NEON_UZP2"; - case AArch64ISD::NEON_ZIP1: - return "AArch64ISD::NEON_ZIP1"; - case AArch64ISD::NEON_ZIP2: - return "AArch64ISD::NEON_ZIP2"; - case AArch64ISD::NEON_TRN1: - return "AArch64ISD::NEON_TRN1"; - case AArch64ISD::NEON_TRN2: - return "AArch64ISD::NEON_TRN2"; - case AArch64ISD::NEON_LD1_UPD: - return "AArch64ISD::NEON_LD1_UPD"; - case AArch64ISD::NEON_LD2_UPD: - return "AArch64ISD::NEON_LD2_UPD"; - case AArch64ISD::NEON_LD3_UPD: - return "AArch64ISD::NEON_LD3_UPD"; - case AArch64ISD::NEON_LD4_UPD: - return "AArch64ISD::NEON_LD4_UPD"; - case AArch64ISD::NEON_ST1_UPD: - return "AArch64ISD::NEON_ST1_UPD"; - case AArch64ISD::NEON_ST2_UPD: - return "AArch64ISD::NEON_ST2_UPD"; - case AArch64ISD::NEON_ST3_UPD: - return "AArch64ISD::NEON_ST3_UPD"; - case AArch64ISD::NEON_ST4_UPD: - return "AArch64ISD::NEON_ST4_UPD"; - case AArch64ISD::NEON_LD1x2_UPD: - return "AArch64ISD::NEON_LD1x2_UPD"; - case AArch64ISD::NEON_LD1x3_UPD: - return "AArch64ISD::NEON_LD1x3_UPD"; - case AArch64ISD::NEON_LD1x4_UPD: - return "AArch64ISD::NEON_LD1x4_UPD"; - case AArch64ISD::NEON_ST1x2_UPD: - return "AArch64ISD::NEON_ST1x2_UPD"; - case AArch64ISD::NEON_ST1x3_UPD: - return "AArch64ISD::NEON_ST1x3_UPD"; - case AArch64ISD::NEON_ST1x4_UPD: - return "AArch64ISD::NEON_ST1x4_UPD"; - case AArch64ISD::NEON_LD2DUP: - return "AArch64ISD::NEON_LD2DUP"; - case AArch64ISD::NEON_LD3DUP: - return "AArch64ISD::NEON_LD3DUP"; - case AArch64ISD::NEON_LD4DUP: - return "AArch64ISD::NEON_LD4DUP"; - case AArch64ISD::NEON_LD2DUP_UPD: - return "AArch64ISD::NEON_LD2DUP_UPD"; - case AArch64ISD::NEON_LD3DUP_UPD: - return "AArch64ISD::NEON_LD3DUP_UPD"; - case AArch64ISD::NEON_LD4DUP_UPD: - return "AArch64ISD::NEON_LD4DUP_UPD"; - case AArch64ISD::NEON_LD2LN_UPD: - return "AArch64ISD::NEON_LD2LN_UPD"; - case AArch64ISD::NEON_LD3LN_UPD: - return "AArch64ISD::NEON_LD3LN_UPD"; - case AArch64ISD::NEON_LD4LN_UPD: - return "AArch64ISD::NEON_LD4LN_UPD"; - case AArch64ISD::NEON_ST2LN_UPD: - return "AArch64ISD::NEON_ST2LN_UPD"; - case AArch64ISD::NEON_ST3LN_UPD: - return "AArch64ISD::NEON_ST3LN_UPD"; - case AArch64ISD::NEON_ST4LN_UPD: - return "AArch64ISD::NEON_ST4LN_UPD"; - case AArch64ISD::NEON_VEXTRACT: - return "AArch64ISD::NEON_VEXTRACT"; +//===----------------------------------------------------------------------===// +// Lowering Code +//===----------------------------------------------------------------------===// + +/// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 +/// CC +static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) { + switch (CC) { default: - return NULL; + llvm_unreachable("Unknown condition code!"); + case ISD::SETNE: + return AArch64CC::NE; + case ISD::SETEQ: + return AArch64CC::EQ; + case ISD::SETGT: + return AArch64CC::GT; + case ISD::SETGE: + return AArch64CC::GE; + case ISD::SETLT: + return AArch64CC::LT; + case ISD::SETLE: + return AArch64CC::LE; + case ISD::SETUGT: + return AArch64CC::HI; + case ISD::SETUGE: + return AArch64CC::HS; + case ISD::SETULT: + return AArch64CC::LO; + case ISD::SETULE: + return AArch64CC::LS; } } -static const uint16_t AArch64FPRArgRegs[] = { - AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, - AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7 -}; -static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs); +/// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC. +static void changeFPCCToAArch64CC(ISD::CondCode CC, + AArch64CC::CondCode &CondCode, + AArch64CC::CondCode &CondCode2) { + CondCode2 = AArch64CC::AL; + switch (CC) { + default: + llvm_unreachable("Unknown FP condition!"); + case ISD::SETEQ: + case ISD::SETOEQ: + CondCode = AArch64CC::EQ; + break; + case ISD::SETGT: + case ISD::SETOGT: + CondCode = AArch64CC::GT; + break; + case ISD::SETGE: + case ISD::SETOGE: + CondCode = AArch64CC::GE; + break; + case ISD::SETOLT: + CondCode = AArch64CC::MI; + break; + case ISD::SETOLE: + CondCode = AArch64CC::LS; + break; + case ISD::SETONE: + CondCode = AArch64CC::MI; + CondCode2 = AArch64CC::GT; + break; + case ISD::SETO: + CondCode = AArch64CC::VC; + break; + case ISD::SETUO: + CondCode = AArch64CC::VS; + break; + case ISD::SETUEQ: + CondCode = AArch64CC::EQ; + CondCode2 = AArch64CC::VS; + break; + case ISD::SETUGT: + CondCode = AArch64CC::HI; + break; + case ISD::SETUGE: + CondCode = AArch64CC::PL; + break; + case ISD::SETLT: + case ISD::SETULT: + CondCode = AArch64CC::LT; + break; + case ISD::SETLE: + case ISD::SETULE: + CondCode = AArch64CC::LE; + break; + case ISD::SETNE: + case ISD::SETUNE: + CondCode = AArch64CC::NE; + break; + } +} -static const uint16_t AArch64ArgRegs[] = { - AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, - AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7 -}; -static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs); +/// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 +/// CC usable with the vector instructions. Fewer operations are available +/// without a real NZCV register, so we have to use less efficient combinations +/// to get the same effect. +static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, + AArch64CC::CondCode &CondCode, + AArch64CC::CondCode &CondCode2, + bool &Invert) { + Invert = false; + switch (CC) { + default: + // Mostly the scalar mappings work fine. + changeFPCCToAArch64CC(CC, CondCode, CondCode2); + break; + case ISD::SETUO: + Invert = true; // Fallthrough + case ISD::SETO: + CondCode = AArch64CC::MI; + CondCode2 = AArch64CC::GE; + break; + case ISD::SETUEQ: + case ISD::SETULT: + case ISD::SETULE: + case ISD::SETUGT: + case ISD::SETUGE: + // All of the compare-mask comparisons are ordered, but we can switch + // between the two by a double inversion. E.g. ULE == !OGT. + Invert = true; + changeFPCCToAArch64CC(getSetCCInverse(CC, false), CondCode, CondCode2); + break; + } +} + +static bool isLegalArithImmed(uint64_t C) { + // Matches AArch64DAGToDAGISel::SelectArithImmed(). + return (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0); +} -static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT, - CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State) { - // Mark all remaining general purpose registers as allocated. We don't - // backtrack: if (for example) an i128 gets put on the stack, no subsequent - // i64 will go in registers (C.11). - for (unsigned i = 0; i < NumArgRegs; ++i) - State.AllocateReg(AArch64ArgRegs[i]); +static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, + SDLoc dl, SelectionDAG &DAG) { + EVT VT = LHS.getValueType(); + + if (VT.isFloatingPoint()) + return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS); + + // The CMP instruction is just an alias for SUBS, and representing it as + // SUBS means that it's possible to get CSE with subtract operations. + // A later phase can perform the optimization of setting the destination + // register to WZR/XZR if it ends up being unused. + unsigned Opcode = AArch64ISD::SUBS; + + if (RHS.getOpcode() == ISD::SUB && isa<ConstantSDNode>(RHS.getOperand(0)) && + cast<ConstantSDNode>(RHS.getOperand(0))->getZExtValue() == 0 && + (CC == ISD::SETEQ || CC == ISD::SETNE)) { + // We'd like to combine a (CMP op1, (sub 0, op2) into a CMN instruction on + // the grounds that "op1 - (-op2) == op1 + op2". However, the C and V flags + // can be set differently by this operation. It comes down to whether + // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then + // everything is fine. If not then the optimization is wrong. Thus general + // comparisons are only valid if op2 != 0. + + // So, finally, the only LLVM-native comparisons that don't mention C and V + // are SETEQ and SETNE. They're the only ones we can safely use CMN for in + // the absence of information about op2. + Opcode = AArch64ISD::ADDS; + RHS = RHS.getOperand(1); + } else if (LHS.getOpcode() == ISD::AND && isa<ConstantSDNode>(RHS) && + cast<ConstantSDNode>(RHS)->getZExtValue() == 0 && + !isUnsignedIntSetCC(CC)) { + // Similarly, (CMP (and X, Y), 0) can be implemented with a TST + // (a.k.a. ANDS) except that the flags are only guaranteed to work for one + // of the signed comparisons. + Opcode = AArch64ISD::ANDS; + RHS = LHS.getOperand(1); + LHS = LHS.getOperand(0); + } - return false; + return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS) + .getValue(1); } -#include "AArch64GenCallingConv.inc" +static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, + SDValue &AArch64cc, SelectionDAG &DAG, SDLoc dl) { + if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { + EVT VT = RHS.getValueType(); + uint64_t C = RHSC->getZExtValue(); + if (!isLegalArithImmed(C)) { + // Constant does not fit, try adjusting it by one? + switch (CC) { + default: + break; + case ISD::SETLT: + case ISD::SETGE: + if ((VT == MVT::i32 && C != 0x80000000 && + isLegalArithImmed((uint32_t)(C - 1))) || + (VT == MVT::i64 && C != 0x80000000ULL && + isLegalArithImmed(C - 1ULL))) { + CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; + C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; + RHS = DAG.getConstant(C, VT); + } + break; + case ISD::SETULT: + case ISD::SETUGE: + if ((VT == MVT::i32 && C != 0 && + isLegalArithImmed((uint32_t)(C - 1))) || + (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) { + CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; + C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; + RHS = DAG.getConstant(C, VT); + } + break; + case ISD::SETLE: + case ISD::SETGT: + if ((VT == MVT::i32 && C != 0x7fffffff && + isLegalArithImmed((uint32_t)(C + 1))) || + (VT == MVT::i64 && C != 0x7ffffffffffffffULL && + isLegalArithImmed(C + 1ULL))) { + CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; + C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; + RHS = DAG.getConstant(C, VT); + } + break; + case ISD::SETULE: + case ISD::SETUGT: + if ((VT == MVT::i32 && C != 0xffffffff && + isLegalArithImmed((uint32_t)(C + 1))) || + (VT == MVT::i64 && C != 0xfffffffffffffffULL && + isLegalArithImmed(C + 1ULL))) { + CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; + C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; + RHS = DAG.getConstant(C, VT); + } + break; + } + } + } -CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const { + SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); + AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); + AArch64cc = DAG.getConstant(AArch64CC, MVT::i32); + return Cmp; +} - switch(CC) { - default: llvm_unreachable("Unsupported calling convention"); - case CallingConv::Fast: - case CallingConv::C: - return CC_A64_APCS; +static std::pair<SDValue, SDValue> +getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) { + assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) && + "Unsupported value type"); + SDValue Value, Overflow; + SDLoc DL(Op); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + unsigned Opc = 0; + switch (Op.getOpcode()) { + default: + llvm_unreachable("Unknown overflow instruction!"); + case ISD::SADDO: + Opc = AArch64ISD::ADDS; + CC = AArch64CC::VS; + break; + case ISD::UADDO: + Opc = AArch64ISD::ADDS; + CC = AArch64CC::HS; + break; + case ISD::SSUBO: + Opc = AArch64ISD::SUBS; + CC = AArch64CC::VS; + break; + case ISD::USUBO: + Opc = AArch64ISD::SUBS; + CC = AArch64CC::LO; + break; + // Multiply needs a little bit extra work. + case ISD::SMULO: + case ISD::UMULO: { + CC = AArch64CC::NE; + bool IsSigned = (Op.getOpcode() == ISD::SMULO) ? true : false; + if (Op.getValueType() == MVT::i32) { + unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; + // For a 32 bit multiply with overflow check we want the instruction + // selector to generate a widening multiply (SMADDL/UMADDL). For that we + // need to generate the following pattern: + // (i64 add 0, (i64 mul (i64 sext|zext i32 %a), (i64 sext|zext i32 %b)) + LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS); + RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS); + SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); + SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Mul, + DAG.getConstant(0, MVT::i64)); + // On AArch64 the upper 32 bits are always zero extended for a 32 bit + // operation. We need to clear out the upper 32 bits, because we used a + // widening multiply that wrote all 64 bits. In the end this should be a + // noop. + Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Add); + if (IsSigned) { + // The signed overflow check requires more than just a simple check for + // any bit set in the upper 32 bits of the result. These bits could be + // just the sign bits of a negative number. To perform the overflow + // check we have to arithmetic shift right the 32nd bit of the result by + // 31 bits. Then we compare the result to the upper 32 bits. + SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Add, + DAG.getConstant(32, MVT::i64)); + UpperBits = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, UpperBits); + SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i32, Value, + DAG.getConstant(31, MVT::i64)); + // It is important that LowerBits is last, otherwise the arithmetic + // shift will not be folded into the compare (SUBS). + SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32); + Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits) + .getValue(1); + } else { + // The overflow check for unsigned multiply is easy. We only need to + // check if any of the upper 32 bits are set. This can be done with a + // CMP (shifted register). For that we need to generate the following + // pattern: + // (i64 AArch64ISD::SUBS i64 0, (i64 srl i64 %Mul, i64 32) + SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, + DAG.getConstant(32, MVT::i64)); + SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); + Overflow = + DAG.getNode(AArch64ISD::SUBS, DL, VTs, DAG.getConstant(0, MVT::i64), + UpperBits).getValue(1); + } + break; + } + assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type"); + // For the 64 bit multiply + Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); + if (IsSigned) { + SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS); + SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value, + DAG.getConstant(63, MVT::i64)); + // It is important that LowerBits is last, otherwise the arithmetic + // shift will not be folded into the compare (SUBS). + SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); + Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits) + .getValue(1); + } else { + SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS); + SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); + Overflow = + DAG.getNode(AArch64ISD::SUBS, DL, VTs, DAG.getConstant(0, MVT::i64), + UpperBits).getValue(1); + } + break; + } + } // switch (...) + + if (Opc) { + SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32); + + // Emit the AArch64 operation with overflow check. + Value = DAG.getNode(Opc, DL, VTs, LHS, RHS); + Overflow = Value.getValue(1); } + return std::make_pair(Value, Overflow); } -void -AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, - SDLoc DL, SDValue &Chain) const { - MachineFunction &MF = DAG.getMachineFunction(); - MachineFrameInfo *MFI = MF.getFrameInfo(); - AArch64MachineFunctionInfo *FuncInfo - = MF.getInfo<AArch64MachineFunctionInfo>(); +SDValue AArch64TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG, + RTLIB::Libcall Call) const { + SmallVector<SDValue, 2> Ops; + for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) + Ops.push_back(Op.getOperand(i)); - SmallVector<SDValue, 8> MemOps; + return makeLibCall(DAG, Call, MVT::f128, &Ops[0], Ops.size(), false, + SDLoc(Op)).first; +} - unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs, - NumArgRegs); - unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs, - NumFPRArgRegs); +static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) { + SDValue Sel = Op.getOperand(0); + SDValue Other = Op.getOperand(1); - unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR); - int GPRIdx = 0; - if (GPRSaveSize != 0) { - GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false); + // If neither operand is a SELECT_CC, give up. + if (Sel.getOpcode() != ISD::SELECT_CC) + std::swap(Sel, Other); + if (Sel.getOpcode() != ISD::SELECT_CC) + return Op; - SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy()); + // The folding we want to perform is: + // (xor x, (select_cc a, b, cc, 0, -1) ) + // --> + // (csel x, (xor x, -1), cc ...) + // + // The latter will get matched to a CSINV instruction. - for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) { - unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass); - SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); - SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, - MachinePointerInfo::getStack(i * 8), - false, false, 0); - MemOps.push_back(Store); - FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, - DAG.getConstant(8, getPointerTy())); - } - } + ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get(); + SDValue LHS = Sel.getOperand(0); + SDValue RHS = Sel.getOperand(1); + SDValue TVal = Sel.getOperand(2); + SDValue FVal = Sel.getOperand(3); + SDLoc dl(Sel); - if (getSubtarget()->hasFPARMv8()) { - unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); - int FPRIdx = 0; - // According to the AArch64 Procedure Call Standard, section B.1/B.3, we - // can omit a register save area if we know we'll never use registers of - // that class. - if (FPRSaveSize != 0) { - FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false); + // FIXME: This could be generalized to non-integer comparisons. + if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) + return Op; - SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy()); + ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal); + ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal); - for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { - unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i], - &AArch64::FPR128RegClass); - SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); - SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, - MachinePointerInfo::getStack(i * 16), - false, false, 0); - MemOps.push_back(Store); - FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, - DAG.getConstant(16, getPointerTy())); - } - } - FuncInfo->setVariadicFPRIdx(FPRIdx); - FuncInfo->setVariadicFPRSize(FPRSaveSize); + // The the values aren't constants, this isn't the pattern we're looking for. + if (!CFVal || !CTVal) + return Op; + + // We can commute the SELECT_CC by inverting the condition. This + // might be needed to make this fit into a CSINV pattern. + if (CTVal->isAllOnesValue() && CFVal->isNullValue()) { + std::swap(TVal, FVal); + std::swap(CTVal, CFVal); + CC = ISD::getSetCCInverse(CC, true); } - unsigned StackOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), 8); - int StackIdx = MFI->CreateFixedObject(8, StackOffset, true); + // If the constants line up, perform the transform! + if (CTVal->isNullValue() && CFVal->isAllOnesValue()) { + SDValue CCVal; + SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); - FuncInfo->setVariadicStackIdx(StackIdx); - FuncInfo->setVariadicGPRIdx(GPRIdx); - FuncInfo->setVariadicGPRSize(GPRSaveSize); + FVal = Other; + TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other, + DAG.getConstant(-1ULL, Other.getValueType())); - if (!MemOps.empty()) { - Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], - MemOps.size()); + return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal, + CCVal, Cmp); } + + return Op; } +static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { + EVT VT = Op.getValueType(); -SDValue -AArch64TargetLowering::LowerFormalArguments(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, - SDLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const { - MachineFunction &MF = DAG.getMachineFunction(); - AArch64MachineFunctionInfo *FuncInfo - = MF.getInfo<AArch64MachineFunctionInfo>(); - MachineFrameInfo *MFI = MF.getFrameInfo(); - bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; + // Let legalize expand this if it isn't a legal type yet. + if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) + return SDValue(); - SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); - CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv)); + SDVTList VTs = DAG.getVTList(VT, MVT::i32); - SmallVector<SDValue, 16> ArgValues; + unsigned Opc; + bool ExtraOp = false; + switch (Op.getOpcode()) { + default: + assert(0 && "Invalid code"); + case ISD::ADDC: + Opc = AArch64ISD::ADDS; + break; + case ISD::SUBC: + Opc = AArch64ISD::SUBS; + break; + case ISD::ADDE: + Opc = AArch64ISD::ADCS; + ExtraOp = true; + break; + case ISD::SUBE: + Opc = AArch64ISD::SBCS; + ExtraOp = true; + break; + } - SDValue ArgValue; - for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { - CCValAssign &VA = ArgLocs[i]; - ISD::ArgFlagsTy Flags = Ins[i].Flags; + if (!ExtraOp) + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1)); + return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1), + Op.getOperand(2)); +} - if (Flags.isByVal()) { - // Byval is used for small structs and HFAs in the PCS, but the system - // should work in a non-compliant manner for larger structs. - EVT PtrTy = getPointerTy(); - int Size = Flags.getByValSize(); - unsigned NumRegs = (Size + 7) / 8; +static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { + // Let legalize expand this if it isn't a legal type yet. + if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) + return SDValue(); - uint32_t BEAlign = 0; - if (Size < 8 && !getSubtarget()->isLittle()) - BEAlign = 8-Size; - unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs, - VA.getLocMemOffset() + BEAlign, - false); - SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy); - InVals.push_back(FrameIdxN); + AArch64CC::CondCode CC; + // The actual operation that sets the overflow or carry flag. + SDValue Value, Overflow; + std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG); - continue; - } else if (VA.isRegLoc()) { - MVT RegVT = VA.getLocVT(); - const TargetRegisterClass *RC = getRegClassFor(RegVT); - unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); + // We use 0 and 1 as false and true values. + SDValue TVal = DAG.getConstant(1, MVT::i32); + SDValue FVal = DAG.getConstant(0, MVT::i32); - ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); - } else { // VA.isRegLoc() - assert(VA.isMemLoc()); + // We use an inverted condition, because the conditional select is inverted + // too. This will allow it to be selected to a single instruction: + // CSINC Wd, WZR, WZR, invert(cond). + SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), MVT::i32); + Overflow = DAG.getNode(AArch64ISD::CSEL, SDLoc(Op), MVT::i32, FVal, TVal, + CCVal, Overflow); - int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, - VA.getLocMemOffset(), true); + SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); + return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow); +} - SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); - ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, - MachinePointerInfo::getFixedStack(FI), - false, false, false, 0); +// Prefetch operands are: +// 1: Address to prefetch +// 2: bool isWrite +// 3: int locality (0 = no locality ... 3 = extreme locality) +// 4: bool isDataCache +static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) { + SDLoc DL(Op); + unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); + unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); + // The data thing is not used. + // unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); + + bool IsStream = !Locality; + // When the locality number is set + if (Locality) { + // The front-end should have filtered out the out-of-range values + assert(Locality <= 3 && "Prefetch locality out-of-range"); + // The locality degree is the opposite of the cache speed. + // Put the number the other way around. + // The encoding starts at 0 for level 1 + Locality = 3 - Locality; + } + // built the mask value encoding the expected behavior. + unsigned PrfOp = (IsWrite << 4) | // Load/Store bit + (Locality << 1) | // Cache level bits + (unsigned)IsStream; // Stream bit + return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0), + DAG.getConstant(PrfOp, MVT::i32), Op.getOperand(1)); +} - } +SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, + SelectionDAG &DAG) const { + assert(Op.getValueType() == MVT::f128 && "Unexpected lowering"); - switch (VA.getLocInfo()) { - default: llvm_unreachable("Unknown loc info!"); - case CCValAssign::Full: break; - case CCValAssign::BCvt: - ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue); - break; - case CCValAssign::SExt: - case CCValAssign::ZExt: - case CCValAssign::AExt: - case CCValAssign::FPExt: { - unsigned DestSize = VA.getValVT().getSizeInBits(); - unsigned DestSubReg; - - switch (DestSize) { - case 8: DestSubReg = AArch64::sub_8; break; - case 16: DestSubReg = AArch64::sub_16; break; - case 32: DestSubReg = AArch64::sub_32; break; - case 64: DestSubReg = AArch64::sub_64; break; - default: llvm_unreachable("Unexpected argument promotion"); - } + RTLIB::Libcall LC; + LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); - ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, - VA.getValVT(), ArgValue, - DAG.getTargetConstant(DestSubReg, MVT::i32)), - 0); - break; - } - } + return LowerF128Call(Op, DAG, LC); +} - InVals.push_back(ArgValue); +SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op, + SelectionDAG &DAG) const { + if (Op.getOperand(0).getValueType() != MVT::f128) { + // It's legal except when f128 is involved + return Op; } - if (isVarArg) - SaveVarArgRegisters(CCInfo, DAG, dl, Chain); + RTLIB::Libcall LC; + LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); - unsigned StackArgSize = CCInfo.getNextStackOffset(); - if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { - // This is a non-standard ABI so by fiat I say we're allowed to make full - // use of the stack area to be popped, which must be aligned to 16 bytes in - // any case: - StackArgSize = RoundUpToAlignment(StackArgSize, 16); + // FP_ROUND node has a second operand indicating whether it is known to be + // precise. That doesn't take part in the LibCall so we can't directly use + // LowerF128Call. + SDValue SrcVal = Op.getOperand(0); + return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1, + /*isSigned*/ false, SDLoc(Op)).first; +} - // If we're expected to restore the stack (e.g. fastcc) then we'll be adding - // a multiple of 16. - FuncInfo->setArgumentStackToRestore(StackArgSize); +static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { + // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. + // Any additional optimization in this function should be recorded + // in the cost tables. + EVT InVT = Op.getOperand(0).getValueType(); + EVT VT = Op.getValueType(); - // This realignment carries over to the available bytes below. Our own - // callers will guarantee the space is free by giving an aligned value to - // CALLSEQ_START. + // FP_TO_XINT conversion from the same type are legal. + if (VT.getSizeInBits() == InVT.getSizeInBits()) + return Op; + + if (InVT == MVT::v2f64 || InVT == MVT::v4f32) { + SDLoc dl(Op); + SDValue Cv = + DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(), + Op.getOperand(0)); + return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv); + } else if (InVT == MVT::v2f32) { + SDLoc dl(Op); + SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v2f64, Op.getOperand(0)); + return DAG.getNode(Op.getOpcode(), dl, VT, Ext); } - // Even if we're not expected to free up the space, it's useful to know how - // much is there while considering tail calls (because we can reuse it). - FuncInfo->setBytesInStackArgArea(StackArgSize); - return Chain; + // Type changing conversions are illegal. + return SDValue(); } -SDValue -AArch64TargetLowering::LowerReturn(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - SDLoc dl, SelectionDAG &DAG) const { - // CCValAssign - represent the assignment of the return value to a location. - SmallVector<CCValAssign, 16> RVLocs; +SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, + SelectionDAG &DAG) const { + if (Op.getOperand(0).getValueType().isVector()) + return LowerVectorFP_TO_INT(Op, DAG); - // CCState - Info about the registers and stack slots. - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + if (Op.getOperand(0).getValueType() != MVT::f128) { + // It's legal except when f128 is involved + return Op; + } - // Analyze outgoing return values. - CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv)); + RTLIB::Libcall LC; + if (Op.getOpcode() == ISD::FP_TO_SINT) + LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType()); + else + LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType()); - SDValue Flag; - SmallVector<SDValue, 4> RetOps(1, Chain); + SmallVector<SDValue, 2> Ops; + for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) + Ops.push_back(Op.getOperand(i)); - for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { - // PCS: "If the type, T, of the result of a function is such that - // void func(T arg) would require that arg be passed as a value in a - // register (or set of registers) according to the rules in 5.4, then the - // result is returned in the same registers as would be used for such an - // argument. - // - // Otherwise, the caller shall reserve a block of memory of sufficient - // size and alignment to hold the result. The address of the memory block - // shall be passed as an additional argument to the function in x8." - // - // This is implemented in two places. The register-return values are dealt - // with here, more complex returns are passed as an sret parameter, which - // means we don't have to worry about it during actual return. - CCValAssign &VA = RVLocs[i]; - assert(VA.isRegLoc() && "Only register-returns should be created by PCS"); + return makeLibCall(DAG, LC, Op.getValueType(), &Ops[0], Ops.size(), false, + SDLoc(Op)).first; +} +static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { + // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. + // Any additional optimization in this function should be recorded + // in the cost tables. + EVT VT = Op.getValueType(); + SDLoc dl(Op); + SDValue In = Op.getOperand(0); + EVT InVT = In.getValueType(); - SDValue Arg = OutVals[i]; + // v2i32 to v2f32 is legal. + if (VT == MVT::v2f32 && InVT == MVT::v2i32) + return Op; - // There's no convenient note in the ABI about this as there is for normal - // arguments, but it says return values are passed in the same registers as - // an argument would be. I believe that includes the comments about - // unspecified higher bits, putting the burden of widening on the *caller* - // for return values. - switch (VA.getLocInfo()) { - default: llvm_unreachable("Unknown loc info"); - case CCValAssign::Full: break; - case CCValAssign::SExt: - case CCValAssign::ZExt: - case CCValAssign::AExt: - // Floating-point values should only be extended when they're going into - // memory, which can't happen here so an integer extend is acceptable. - Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); - break; - case CCValAssign::BCvt: - Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); - break; - } + // This function only handles v2f64 outputs. + if (VT == MVT::v2f64) { + // Extend the input argument to a v2i64 that we can feed into the + // floating point conversion. Zero or sign extend based on whether + // we're doing a signed or unsigned float conversion. + unsigned Opc = + Op.getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; + assert(Op.getNumOperands() == 1 && "FP conversions take one argument"); + SDValue Promoted = DAG.getNode(Opc, dl, MVT::v2i64, Op.getOperand(0)); + return DAG.getNode(Op.getOpcode(), dl, Op.getValueType(), Promoted); + } - Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); - Flag = Chain.getValue(1); - RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); + // Scalarize v2i64 to v2f32 conversions. + std::vector<SDValue> BuildVectorOps; + for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { + SDValue Sclr = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, In, + DAG.getConstant(i, MVT::i64)); + Sclr = DAG.getNode(Op->getOpcode(), dl, MVT::f32, Sclr); + BuildVectorOps.push_back(Sclr); } - RetOps[0] = Chain; // Update chain. + return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, BuildVectorOps); +} - // Add the flag if we have it. - if (Flag.getNode()) - RetOps.push_back(Flag); +SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, + SelectionDAG &DAG) const { + if (Op.getValueType().isVector()) + return LowerVectorINT_TO_FP(Op, DAG); - return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other, - &RetOps[0], RetOps.size()); -} + // i128 conversions are libcalls. + if (Op.getOperand(0).getValueType() == MVT::i128) + return SDValue(); + + // Other conversions are legal, unless it's to the completely software-based + // fp128. + if (Op.getValueType() != MVT::f128) + return Op; -unsigned AArch64TargetLowering::getByValTypeAlignment(Type *Ty) const { - // This is a new backend. For anything more precise than this a FE should - // set an explicit alignment. - return 4; + RTLIB::Libcall LC; + if (Op.getOpcode() == ISD::SINT_TO_FP) + LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); + else + LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); + + return LowerF128Call(Op, DAG, LC); } -SDValue -AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, - SmallVectorImpl<SDValue> &InVals) const { - SelectionDAG &DAG = CLI.DAG; - SDLoc &dl = CLI.DL; - SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; - SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; - SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; - SDValue Chain = CLI.Chain; - SDValue Callee = CLI.Callee; - bool &IsTailCall = CLI.IsTailCall; - CallingConv::ID CallConv = CLI.CallConv; - bool IsVarArg = CLI.IsVarArg; +SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op, + SelectionDAG &DAG) const { + // For iOS, we want to call an alternative entry point: __sincos_stret, + // which returns the values in two S / D registers. + SDLoc dl(Op); + SDValue Arg = Op.getOperand(0); + EVT ArgVT = Arg.getValueType(); + Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); - MachineFunction &MF = DAG.getMachineFunction(); - AArch64MachineFunctionInfo *FuncInfo - = MF.getInfo<AArch64MachineFunctionInfo>(); - bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; - bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet(); - bool IsSibCall = false; + ArgListTy Args; + ArgListEntry Entry; - if (IsTailCall) { - IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, - IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), - Outs, OutVals, Ins, DAG); + Entry.Node = Arg; + Entry.Ty = ArgTy; + Entry.isSExt = false; + Entry.isZExt = false; + Args.push_back(Entry); - // A sibling call is one where we're under the usual C ABI and not planning - // to change that but can still do a tail call: - if (!TailCallOpt && IsTailCall) - IsSibCall = true; - } + const char *LibcallName = + (ArgVT == MVT::f64) ? "__sincos_stret" : "__sincosf_stret"; + SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy()); - SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); - CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv)); + StructType *RetTy = StructType::get(ArgTy, ArgTy, NULL); + TargetLowering::CallLoweringInfo CLI(DAG); + CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()) + .setCallee(CallingConv::Fast, RetTy, Callee, &Args, 0); - // On AArch64 (and all other architectures I'm aware of) the most this has to - // do is adjust the stack pointer. - unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16); - if (IsSibCall) { - // Since we're not changing the ABI to make this a tail call, the memory - // operands are already available in the caller's incoming argument space. - NumBytes = 0; + std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); + return CallResult.first; +} + +SDValue AArch64TargetLowering::LowerOperation(SDValue Op, + SelectionDAG &DAG) const { + switch (Op.getOpcode()) { + default: + llvm_unreachable("unimplemented operand"); + return SDValue(); + case ISD::GlobalAddress: + return LowerGlobalAddress(Op, DAG); + case ISD::GlobalTLSAddress: + return LowerGlobalTLSAddress(Op, DAG); + case ISD::SETCC: + return LowerSETCC(Op, DAG); + case ISD::BR_CC: + return LowerBR_CC(Op, DAG); + case ISD::SELECT: + return LowerSELECT(Op, DAG); + case ISD::SELECT_CC: + return LowerSELECT_CC(Op, DAG); + case ISD::JumpTable: + return LowerJumpTable(Op, DAG); + case ISD::ConstantPool: + return LowerConstantPool(Op, DAG); + case ISD::BlockAddress: + return LowerBlockAddress(Op, DAG); + case ISD::VASTART: + return LowerVASTART(Op, DAG); + case ISD::VACOPY: + return LowerVACOPY(Op, DAG); + case ISD::VAARG: + return LowerVAARG(Op, DAG); + case ISD::ADDC: + case ISD::ADDE: + case ISD::SUBC: + case ISD::SUBE: + return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); + case ISD::SADDO: + case ISD::UADDO: + case ISD::SSUBO: + case ISD::USUBO: + case ISD::SMULO: + case ISD::UMULO: + return LowerXALUO(Op, DAG); + case ISD::FADD: + return LowerF128Call(Op, DAG, RTLIB::ADD_F128); + case ISD::FSUB: + return LowerF128Call(Op, DAG, RTLIB::SUB_F128); + case ISD::FMUL: + return LowerF128Call(Op, DAG, RTLIB::MUL_F128); + case ISD::FDIV: + return LowerF128Call(Op, DAG, RTLIB::DIV_F128); + case ISD::FP_ROUND: + return LowerFP_ROUND(Op, DAG); + case ISD::FP_EXTEND: + return LowerFP_EXTEND(Op, DAG); + case ISD::FRAMEADDR: + return LowerFRAMEADDR(Op, DAG); + case ISD::RETURNADDR: + return LowerRETURNADDR(Op, DAG); + case ISD::INSERT_VECTOR_ELT: + return LowerINSERT_VECTOR_ELT(Op, DAG); + case ISD::EXTRACT_VECTOR_ELT: + return LowerEXTRACT_VECTOR_ELT(Op, DAG); + case ISD::BUILD_VECTOR: + return LowerBUILD_VECTOR(Op, DAG); + case ISD::VECTOR_SHUFFLE: + return LowerVECTOR_SHUFFLE(Op, DAG); + case ISD::EXTRACT_SUBVECTOR: + return LowerEXTRACT_SUBVECTOR(Op, DAG); + case ISD::SRA: + case ISD::SRL: + case ISD::SHL: + return LowerVectorSRA_SRL_SHL(Op, DAG); + case ISD::SHL_PARTS: + return LowerShiftLeftParts(Op, DAG); + case ISD::SRL_PARTS: + case ISD::SRA_PARTS: + return LowerShiftRightParts(Op, DAG); + case ISD::CTPOP: + return LowerCTPOP(Op, DAG); + case ISD::FCOPYSIGN: + return LowerFCOPYSIGN(Op, DAG); + case ISD::AND: + return LowerVectorAND(Op, DAG); + case ISD::OR: + return LowerVectorOR(Op, DAG); + case ISD::XOR: + return LowerXOR(Op, DAG); + case ISD::PREFETCH: + return LowerPREFETCH(Op, DAG); + case ISD::SINT_TO_FP: + case ISD::UINT_TO_FP: + return LowerINT_TO_FP(Op, DAG); + case ISD::FP_TO_SINT: + case ISD::FP_TO_UINT: + return LowerFP_TO_INT(Op, DAG); + case ISD::FSINCOS: + return LowerFSINCOS(Op, DAG); } +} - // FPDiff is the byte offset of the call's argument area from the callee's. - // Stores to callee stack arguments will be placed in FixedStackSlots offset - // by this amount for a tail call. In a sibling call it must be 0 because the - // caller will deallocate the entire stack and the callee still expects its - // arguments to begin at SP+0. Completely unused for non-tail calls. - int FPDiff = 0; +/// getFunctionAlignment - Return the Log2 alignment of this function. +unsigned AArch64TargetLowering::getFunctionAlignment(const Function *F) const { + return 2; +} - if (IsTailCall && !IsSibCall) { - unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); +//===----------------------------------------------------------------------===// +// Calling Convention Implementation +//===----------------------------------------------------------------------===// - // FPDiff will be negative if this tail call requires more space than we - // would automatically have in our incoming argument space. Positive if we - // can actually shrink the stack. - FPDiff = NumReusableBytes - NumBytes; +#include "AArch64GenCallingConv.inc" - // The stack pointer must be 16-byte aligned at all times it's used for a - // memory operation, which in practice means at *all* times and in - // particular across call boundaries. Therefore our own arguments started at - // a 16-byte aligned SP and the delta applied for the tail call should - // satisfy the same constraint. - assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); +/// Selects the correct CCAssignFn for a the given CallingConvention +/// value. +CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC, + bool IsVarArg) const { + switch (CC) { + default: + llvm_unreachable("Unsupported calling convention."); + case CallingConv::WebKit_JS: + return CC_AArch64_WebKit_JS; + case CallingConv::C: + case CallingConv::Fast: + if (!Subtarget->isTargetDarwin()) + return CC_AArch64_AAPCS; + return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS; } +} - if (!IsSibCall) - Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), - dl); - - SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP, - getPointerTy()); +SDValue AArch64TargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); - SmallVector<SDValue, 8> MemOpChains; - SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; + // Assign locations to all of the incoming arguments. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), + getTargetMachine(), ArgLocs, *DAG.getContext()); + // At this point, Ins[].VT may already be promoted to i32. To correctly + // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and + // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT. + // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here + // we use a special version of AnalyzeFormalArguments to pass in ValVT and + // LocVT. + unsigned NumArgs = Ins.size(); + Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); + unsigned CurArgIdx = 0; + for (unsigned i = 0; i != NumArgs; ++i) { + MVT ValVT = Ins[i].VT; + std::advance(CurOrigArg, Ins[i].OrigArgIndex - CurArgIdx); + CurArgIdx = Ins[i].OrigArgIndex; + + // Get type of the original argument. + EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true); + MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other; + // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. + if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) + ValVT = MVT::i8; + else if (ActualMVT == MVT::i16) + ValVT = MVT::i16; + + CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false); + bool Res = + AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo); + assert(!Res && "Call operand has unhandled type"); + (void)Res; + } + assert(ArgLocs.size() == Ins.size()); + SmallVector<SDValue, 16> ArgValues; for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; - ISD::ArgFlagsTy Flags = Outs[i].Flags; - SDValue Arg = OutVals[i]; - - // Callee does the actual widening, so all extensions just use an implicit - // definition of the rest of the Loc. Aesthetically, this would be nicer as - // an ANY_EXTEND, but that isn't valid for floating-point types and this - // alternative works on integer types too. - switch (VA.getLocInfo()) { - default: llvm_unreachable("Unknown loc info!"); - case CCValAssign::Full: break; - case CCValAssign::SExt: - case CCValAssign::ZExt: - case CCValAssign::AExt: - case CCValAssign::FPExt: { - unsigned SrcSize = VA.getValVT().getSizeInBits(); - unsigned SrcSubReg; - - switch (SrcSize) { - case 8: SrcSubReg = AArch64::sub_8; break; - case 16: SrcSubReg = AArch64::sub_16; break; - case 32: SrcSubReg = AArch64::sub_32; break; - case 64: SrcSubReg = AArch64::sub_64; break; - default: llvm_unreachable("Unexpected argument promotion"); - } - Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, - VA.getLocVT(), - DAG.getUNDEF(VA.getLocVT()), - Arg, - DAG.getTargetConstant(SrcSubReg, MVT::i32)), - 0); + if (Ins[i].Flags.isByVal()) { + // Byval is used for HFAs in the PCS, but the system should work in a + // non-compliant manner for larger structs. + EVT PtrTy = getPointerTy(); + int Size = Ins[i].Flags.getByValSize(); + unsigned NumRegs = (Size + 7) / 8; - break; - } - case CCValAssign::BCvt: - Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); - break; - } + // FIXME: This works on big-endian for composite byvals, which are the common + // case. It should also work for fundamental types too. + unsigned FrameIdx = + MFI->CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false); + SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy); + InVals.push_back(FrameIdxN); - if (VA.isRegLoc()) { - // A normal register (sub-) argument. For now we just note it down because - // we want to copy things into registers as late as possible to avoid - // register-pressure (and possibly worse). - RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); continue; - } + } if (VA.isRegLoc()) { + // Arguments stored in registers. + EVT RegVT = VA.getLocVT(); + + SDValue ArgValue; + const TargetRegisterClass *RC; + + if (RegVT == MVT::i32) + RC = &AArch64::GPR32RegClass; + else if (RegVT == MVT::i64) + RC = &AArch64::GPR64RegClass; + else if (RegVT == MVT::f32) + RC = &AArch64::FPR32RegClass; + else if (RegVT == MVT::f64 || RegVT.is64BitVector()) + RC = &AArch64::FPR64RegClass; + else if (RegVT == MVT::f128 || RegVT.is128BitVector()) + RC = &AArch64::FPR128RegClass; + else + llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); - assert(VA.isMemLoc() && "unexpected argument location"); + // Transform the arguments in physical registers into virtual ones. + unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); + ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT); - SDValue DstAddr; - MachinePointerInfo DstInfo; - if (IsTailCall) { - uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() : - VA.getLocVT().getSizeInBits(); - OpSize = (OpSize + 7) / 8; - int32_t Offset = VA.getLocMemOffset() + FPDiff; - int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); + // If this is an 8, 16 or 32-bit value, it is really passed promoted + // to 64 bits. Insert an assert[sz]ext to capture this, then + // truncate to the right size. + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + break; + case CCValAssign::BCvt: + ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue); + break; + case CCValAssign::AExt: + case CCValAssign::SExt: + case CCValAssign::ZExt: + // SelectionDAGBuilder will insert appropriate AssertZExt & AssertSExt + // nodes after our lowering. + assert(RegVT == Ins[i].VT && "incorrect register location selected"); + break; + } - DstAddr = DAG.getFrameIndex(FI, getPointerTy()); - DstInfo = MachinePointerInfo::getFixedStack(FI); + InVals.push_back(ArgValue); + + } else { // VA.isRegLoc() + assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem"); + unsigned ArgOffset = VA.getLocMemOffset(); + unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; - // Make sure any stack arguments overlapping with where we're storing are - // loaded before this eventual operation. Otherwise they'll be clobbered. - Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); - } else { - uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize()*8 : - VA.getLocVT().getSizeInBits(); - OpSize = (OpSize + 7) / 8; uint32_t BEAlign = 0; - if (OpSize < 8 && !getSubtarget()->isLittle()) - BEAlign = 8-OpSize; - SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + BEAlign); + if (ArgSize < 8 && !Subtarget->isLittleEndian()) + BEAlign = 8 - ArgSize; - DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); - DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset()); - } + int FI = MFI->CreateFixedObject(ArgSize, ArgOffset + BEAlign, true); - if (Flags.isByVal()) { - SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64); - SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode, - Flags.getByValAlign(), - /*isVolatile = */ false, - /*alwaysInline = */ false, - DstInfo, MachinePointerInfo(0)); - MemOpChains.push_back(Cpy); - } else { - // Normal stack argument, put it where it's needed. - SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo, - false, false, 0); - MemOpChains.push_back(Store); - } - } + // Create load nodes to retrieve arguments from the stack. + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); + SDValue ArgValue; - // The loads and stores generated above shouldn't clash with each - // other. Combining them with this TokenFactor notes that fact for the rest of - // the backend. - if (!MemOpChains.empty()) - Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, - &MemOpChains[0], MemOpChains.size()); + ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; + switch (VA.getLocInfo()) { + default: + break; + case CCValAssign::SExt: + ExtType = ISD::SEXTLOAD; + break; + case CCValAssign::ZExt: + ExtType = ISD::ZEXTLOAD; + break; + case CCValAssign::AExt: + ExtType = ISD::EXTLOAD; + break; + } - // Most of the rest of the instructions need to be glued together; we don't - // want assignments to actual registers used by a call to be rearranged by a - // well-meaning scheduler. - SDValue InFlag; + ArgValue = DAG.getExtLoad(ExtType, DL, VA.getValVT(), Chain, FIN, + MachinePointerInfo::getFixedStack(FI), + VA.getLocVT(), + false, false, false, 0); - for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { - Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, - RegsToPass[i].second, InFlag); - InFlag = Chain.getValue(1); + InVals.push_back(ArgValue); + } } - // The linker is responsible for inserting veneers when necessary to put a - // function call destination in range, so we don't need to bother with a - // wrapper here. - if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { - const GlobalValue *GV = G->getGlobal(); - Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy()); - } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { - const char *Sym = S->getSymbol(); - Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy()); - } + // varargs + if (isVarArg) { + if (!Subtarget->isTargetDarwin()) { + // The AAPCS variadic function ABI is identical to the non-variadic + // one. As a result there may be more arguments in registers and we should + // save them for future reference. + saveVarArgRegisters(CCInfo, DAG, DL, Chain); + } - // We don't usually want to end the call-sequence here because we would tidy - // the frame up *after* the call, however in the ABI-changing tail-call case - // we've carefully laid out the parameters so that when sp is reset they'll be - // in the correct location. - if (IsTailCall && !IsSibCall) { - Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), - DAG.getIntPtrConstant(0, true), InFlag, dl); - InFlag = Chain.getValue(1); + AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); + // This will point to the next argument passed via stack. + unsigned StackOffset = CCInfo.getNextStackOffset(); + // We currently pass all varargs at 8-byte alignment. + StackOffset = ((StackOffset + 7) & ~7); + AFI->setVarArgsStackIndex(MFI->CreateFixedObject(4, StackOffset, true)); } - // We produce the following DAG scheme for the actual call instruction: - // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag? - // - // Most arguments aren't going to be used and just keep the values live as - // far as LLVM is concerned. It's expected to be selected as simply "bl - // callee" (for a direct, non-tail call). - std::vector<SDValue> Ops; - Ops.push_back(Chain); - Ops.push_back(Callee); + AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); + unsigned StackArgSize = CCInfo.getNextStackOffset(); + bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; + if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { + // This is a non-standard ABI so by fiat I say we're allowed to make full + // use of the stack area to be popped, which must be aligned to 16 bytes in + // any case: + StackArgSize = RoundUpToAlignment(StackArgSize, 16); - if (IsTailCall) { - // Each tail call may have to adjust the stack by a different amount, so - // this information must travel along with the operation for eventual - // consumption by emitEpilogue. - Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32)); + // If we're expected to restore the stack (e.g. fastcc) then we'll be adding + // a multiple of 16. + FuncInfo->setArgumentStackToRestore(StackArgSize); + + // This realignment carries over to the available bytes below. Our own + // callers will guarantee the space is free by giving an aligned value to + // CALLSEQ_START. } + // Even if we're not expected to free up the space, it's useful to know how + // much is there while considering tail calls (because we can reuse it). + FuncInfo->setBytesInStackArgArea(StackArgSize); - for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) - Ops.push_back(DAG.getRegister(RegsToPass[i].first, - RegsToPass[i].second.getValueType())); + return Chain; +} +void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo, + SelectionDAG &DAG, SDLoc DL, + SDValue &Chain) const { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); - // Add a register mask operand representing the call-preserved registers. This - // is used later in codegen to constrain register-allocation. - const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); - const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); - assert(Mask && "Missing call preserved mask for calling convention"); - Ops.push_back(DAG.getRegisterMask(Mask)); + SmallVector<SDValue, 8> MemOps; - // If we needed glue, put it in as the last argument. - if (InFlag.getNode()) - Ops.push_back(InFlag); + static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2, + AArch64::X3, AArch64::X4, AArch64::X5, + AArch64::X6, AArch64::X7 }; + static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs); + unsigned FirstVariadicGPR = + CCInfo.getFirstUnallocated(GPRArgRegs, NumGPRArgRegs); - SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR); + int GPRIdx = 0; + if (GPRSaveSize != 0) { + GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false); - if (IsTailCall) { - return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); + SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy()); + + for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) { + unsigned VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass); + SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); + SDValue Store = + DAG.getStore(Val.getValue(1), DL, Val, FIN, + MachinePointerInfo::getStack(i * 8), false, false, 0); + MemOps.push_back(Store); + FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, + DAG.getConstant(8, getPointerTy())); + } } + FuncInfo->setVarArgsGPRIndex(GPRIdx); + FuncInfo->setVarArgsGPRSize(GPRSaveSize); - Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size()); - InFlag = Chain.getValue(1); + if (Subtarget->hasFPARMv8()) { + static const MCPhysReg FPRArgRegs[] = { + AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, + AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7}; + static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs); + unsigned FirstVariadicFPR = + CCInfo.getFirstUnallocated(FPRArgRegs, NumFPRArgRegs); + + unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); + int FPRIdx = 0; + if (FPRSaveSize != 0) { + FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false); - // Now we can reclaim the stack, just as well do it before working out where - // our return value is. - if (!IsSibCall) { - uint64_t CalleePopBytes - = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0; + SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy()); - Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), - DAG.getIntPtrConstant(CalleePopBytes, true), - InFlag, dl); - InFlag = Chain.getValue(1); + for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { + unsigned VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass); + SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); + + SDValue Store = + DAG.getStore(Val.getValue(1), DL, Val, FIN, + MachinePointerInfo::getStack(i * 16), false, false, 0); + MemOps.push_back(Store); + FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, + DAG.getConstant(16, getPointerTy())); + } + } + FuncInfo->setVarArgsFPRIndex(FPRIdx); + FuncInfo->setVarArgsFPRSize(FPRSaveSize); } - return LowerCallResult(Chain, InFlag, CallConv, - IsVarArg, Ins, dl, DAG, InVals); + if (!MemOps.empty()) { + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); + } } -SDValue -AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, - CallingConv::ID CallConv, bool IsVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, - SDLoc dl, SelectionDAG &DAG, - SmallVectorImpl<SDValue> &InVals) const { +/// LowerCallResult - Lower the result values of a call into the +/// appropriate copies out of appropriate physical registers. +SDValue AArch64TargetLowering::LowerCallResult( + SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals, bool isThisReturn, + SDValue ThisVal) const { + CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS + ? RetCC_AArch64_WebKit_JS + : RetCC_AArch64_AAPCS; // Assign locations to each value returned by this call. SmallVector<CCValAssign, 16> RVLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), RVLocs, *DAG.getContext()); - CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv)); + CCInfo.AnalyzeCallResult(Ins, RetCC); + // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign VA = RVLocs[i]; - // Return values that are too big to fit into registers should use an sret - // pointer, so this can be a lot simpler than the main argument code. - assert(VA.isRegLoc() && "Memory locations not expected for call return"); + // Pass 'this' value directly from the argument to return value, to avoid + // reg unit interference + if (i == 0 && isThisReturn) { + assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 && + "unexpected return calling convention register assignment"); + InVals.push_back(ThisVal); + continue; + } - SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), - InFlag); + SDValue Val = + DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); Chain = Val.getValue(1); InFlag = Val.getValue(2); switch (VA.getLocInfo()) { - default: llvm_unreachable("Unknown loc info!"); - case CCValAssign::Full: break; - case CCValAssign::BCvt: - Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: break; - case CCValAssign::ZExt: - case CCValAssign::SExt: - case CCValAssign::AExt: - // Floating-point arguments only get extended/truncated if they're going - // in memory, so using the integer operation is acceptable here. - Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); + case CCValAssign::BCvt: + Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); break; } @@ -1812,17 +1960,12 @@ AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, return Chain; } -bool -AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, - CallingConv::ID CalleeCC, - bool IsVarArg, - bool IsCalleeStructRet, - bool IsCallerStructRet, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - const SmallVectorImpl<ISD::InputArg> &Ins, - SelectionDAG& DAG) const { - +bool AArch64TargetLowering::isEligibleForTailCallOptimization( + SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, + bool isCalleeStructRet, bool isCallerStructRet, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { // For CallingConv::C this function knows whether the ABI needs // changing. That's not true for other conventions so they will have to opt in // manually. @@ -1838,7 +1981,8 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // we want to reuse during a tail call. Working around this *is* possible (see // X86) but less efficient and uglier in LowerCall. for (Function::const_arg_iterator i = CallerF->arg_begin(), - e = CallerF->arg_end(); i != e; ++i) + e = CallerF->arg_end(); + i != e; ++i) if (i->hasByValAttr()) return false; @@ -1854,10 +1998,10 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // I want anyone implementing a new calling convention to think long and hard // about this assert. - assert((!IsVarArg || CalleeCC == CallingConv::C) - && "Unexpected variadic calling convention"); + assert((!isVarArg || CalleeCC == CallingConv::C) && + "Unexpected variadic calling convention"); - if (IsVarArg && !Outs.empty()) { + if (isVarArg && !Outs.empty()) { // At least two cases here: if caller is fastcc then we can't have any // memory arguments (we'd be expected to clean up the stack afterwards). If // caller is C then we could potentially use its argument area. @@ -1865,10 +2009,10 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // FIXME: for now we take the most conservative of these in both cases: // disallow all variadic memory operands. SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), + CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), getTargetMachine(), ArgLocs, *DAG.getContext()); - CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); + CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, true)); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) if (!ArgLocs[i].isRegLoc()) return false; @@ -1880,12 +2024,12 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, SmallVector<CCValAssign, 16> RVLocs1; CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), getTargetMachine(), RVLocs1, *DAG.getContext()); - CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC)); + CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForCall(CalleeCC, isVarArg)); SmallVector<CCValAssign, 16> RVLocs2; CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), getTargetMachine(), RVLocs2, *DAG.getContext()); - CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC)); + CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForCall(CallerCC, isVarArg)); if (RVLocs1.size() != RVLocs2.size()) return false; @@ -1909,28 +2053,18 @@ AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, return true; SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), + CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), getTargetMachine(), ArgLocs, *DAG.getContext()); - CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); + CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); - const AArch64MachineFunctionInfo *FuncInfo - = MF.getInfo<AArch64MachineFunctionInfo>(); + const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); // If the stack arguments for this call would fit into our own save area then // the call can be made tail. return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea(); } -bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, - bool TailCallOpt) const { - return CallCC == CallingConv::Fast && TailCallOpt; -} - -bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const { - return CallCC == CallingConv::Fast; -} - SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, SelectionDAG &DAG, MachineFrameInfo *MFI, @@ -1946,7 +2080,8 @@ SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, // Add a chain value for each stack argument corresponding for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), - UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U) + UE = DAG.getEntryNode().getNode()->use_end(); + U != UE; ++U) if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) if (FI->getIndex() < 0) { @@ -1959,625 +2094,609 @@ SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, ArgChains.push_back(SDValue(L, 1)); } - // Build a tokenfactor for all the chains. - return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, - &ArgChains[0], ArgChains.size()); + // Build a tokenfactor for all the chains. + return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); } -static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) { - switch (CC) { - case ISD::SETEQ: return A64CC::EQ; - case ISD::SETGT: return A64CC::GT; - case ISD::SETGE: return A64CC::GE; - case ISD::SETLT: return A64CC::LT; - case ISD::SETLE: return A64CC::LE; - case ISD::SETNE: return A64CC::NE; - case ISD::SETUGT: return A64CC::HI; - case ISD::SETUGE: return A64CC::HS; - case ISD::SETULT: return A64CC::LO; - case ISD::SETULE: return A64CC::LS; - default: llvm_unreachable("Unexpected condition code"); - } -} - -bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const { - // icmp is implemented using adds/subs immediate, which take an unsigned - // 12-bit immediate, optionally shifted left by 12 bits. - - // Symmetric by using adds/subs - if (Val < 0) - Val = -Val; - - return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0; -} - -SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS, - ISD::CondCode CC, SDValue &A64cc, - SelectionDAG &DAG, SDLoc &dl) const { - if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { - int64_t C = 0; - EVT VT = RHSC->getValueType(0); - bool knownInvalid = false; - - // I'm not convinced the rest of LLVM handles these edge cases properly, but - // we can at least get it right. - if (isSignedIntSetCC(CC)) { - C = RHSC->getSExtValue(); - } else if (RHSC->getZExtValue() > INT64_MAX) { - // A 64-bit constant not representable by a signed 64-bit integer is far - // too big to fit into a SUBS immediate anyway. - knownInvalid = true; - } else { - C = RHSC->getZExtValue(); - } - - if (!knownInvalid && !isLegalICmpImmediate(C)) { - // Constant does not fit, try adjusting it by one? - switch (CC) { - default: break; - case ISD::SETLT: - case ISD::SETGE: - if (isLegalICmpImmediate(C-1)) { - CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; - RHS = DAG.getConstant(C-1, VT); - } - break; - case ISD::SETULT: - case ISD::SETUGE: - if (isLegalICmpImmediate(C-1)) { - CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; - RHS = DAG.getConstant(C-1, VT); - } - break; - case ISD::SETLE: - case ISD::SETGT: - if (isLegalICmpImmediate(C+1)) { - CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; - RHS = DAG.getConstant(C+1, VT); - } - break; - case ISD::SETULE: - case ISD::SETUGT: - if (isLegalICmpImmediate(C+1)) { - CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; - RHS = DAG.getConstant(C+1, VT); - } - break; - } - } - } - - A64CC::CondCodes CondCode = IntCCToA64CC(CC); - A64cc = DAG.getConstant(CondCode, MVT::i32); - return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, - DAG.getCondCode(CC)); +bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, + bool TailCallOpt) const { + return CallCC == CallingConv::Fast && TailCallOpt; } -static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC, - A64CC::CondCodes &Alternative) { - A64CC::CondCodes CondCode = A64CC::Invalid; - Alternative = A64CC::Invalid; - - switch (CC) { - default: llvm_unreachable("Unknown FP condition!"); - case ISD::SETEQ: - case ISD::SETOEQ: CondCode = A64CC::EQ; break; - case ISD::SETGT: - case ISD::SETOGT: CondCode = A64CC::GT; break; - case ISD::SETGE: - case ISD::SETOGE: CondCode = A64CC::GE; break; - case ISD::SETOLT: CondCode = A64CC::MI; break; - case ISD::SETOLE: CondCode = A64CC::LS; break; - case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break; - case ISD::SETO: CondCode = A64CC::VC; break; - case ISD::SETUO: CondCode = A64CC::VS; break; - case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break; - case ISD::SETUGT: CondCode = A64CC::HI; break; - case ISD::SETUGE: CondCode = A64CC::PL; break; - case ISD::SETLT: - case ISD::SETULT: CondCode = A64CC::LT; break; - case ISD::SETLE: - case ISD::SETULE: CondCode = A64CC::LE; break; - case ISD::SETNE: - case ISD::SETUNE: CondCode = A64CC::NE; break; - } - return CondCode; +bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const { + return CallCC == CallingConv::Fast; } +/// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain, +/// and add input and output parameter nodes. SDValue -AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { - SDLoc DL(Op); - EVT PtrVT = getPointerTy(); - const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); - - switch(getTargetMachine().getCodeModel()) { - case CodeModel::Small: - // The most efficient code is PC-relative anyway for the small memory model, - // so we don't need to worry about relocation model. - return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, - DAG.getTargetBlockAddress(BA, PtrVT, 0, - AArch64II::MO_NO_FLAG), - DAG.getTargetBlockAddress(BA, PtrVT, 0, - AArch64II::MO_LO12), - DAG.getConstant(/*Alignment=*/ 4, MVT::i32)); - case CodeModel::Large: - return DAG.getNode( - AArch64ISD::WrapperLarge, DL, PtrVT, - DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3), - DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC), - DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC), - DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); - default: - llvm_unreachable("Only small and large code models supported now"); - } -} - +AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, + SmallVectorImpl<SDValue> &InVals) const { + SelectionDAG &DAG = CLI.DAG; + SDLoc &DL = CLI.DL; + SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; + SmallVector<SDValue, 32> &OutVals = CLI.OutVals; + SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + bool &IsTailCall = CLI.IsTailCall; + CallingConv::ID CallConv = CLI.CallConv; + bool IsVarArg = CLI.IsVarArg; -// (BRCOND chain, val, dest) -SDValue -AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { - SDLoc dl(Op); - SDValue Chain = Op.getOperand(0); - SDValue TheBit = Op.getOperand(1); - SDValue DestBB = Op.getOperand(2); + MachineFunction &MF = DAG.getMachineFunction(); + bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); + bool IsThisReturn = false; - // AArch64 BooleanContents is the default UndefinedBooleanContent, which means - // that as the consumer we are responsible for ignoring rubbish in higher - // bits. - TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, - DAG.getConstant(1, MVT::i32)); + AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); + bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; + bool IsSibCall = false; - SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, - DAG.getConstant(0, TheBit.getValueType()), - DAG.getCondCode(ISD::SETNE)); + if (IsTailCall) { + // Check if it's really possible to do a tail call. + IsTailCall = isEligibleForTailCallOptimization( + Callee, CallConv, IsVarArg, IsStructRet, + MF.getFunction()->hasStructRetAttr(), Outs, OutVals, Ins, DAG); + if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall()) + report_fatal_error("failed to perform tail call elimination on a call " + "site marked musttail"); - return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain, - A64CMP, DAG.getConstant(A64CC::NE, MVT::i32), - DestBB); -} + // A sibling call is one where we're under the usual C ABI and not planning + // to change that but can still do a tail call: + if (!TailCallOpt && IsTailCall) + IsSibCall = true; -// (BR_CC chain, condcode, lhs, rhs, dest) -SDValue -AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { - SDLoc dl(Op); - SDValue Chain = Op.getOperand(0); - ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); - SDValue LHS = Op.getOperand(2); - SDValue RHS = Op.getOperand(3); - SDValue DestBB = Op.getOperand(4); + if (IsTailCall) + ++NumTailCalls; + } - if (LHS.getValueType() == MVT::f128) { - // f128 comparisons are lowered to runtime calls by a routine which sets - // LHS, RHS and CC appropriately for the rest of this function to continue. - softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); + // Analyze operands of the call, assigning locations to each operand. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), + getTargetMachine(), ArgLocs, *DAG.getContext()); - // If softenSetCCOperands returned a scalar, we need to compare the result - // against zero to select between true and false values. - if (RHS.getNode() == 0) { - RHS = DAG.getConstant(0, LHS.getValueType()); - CC = ISD::SETNE; + if (IsVarArg) { + // Handle fixed and variable vector arguments differently. + // Variable vector arguments always go into memory. + unsigned NumArgs = Outs.size(); + + for (unsigned i = 0; i != NumArgs; ++i) { + MVT ArgVT = Outs[i].VT; + ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; + CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, + /*IsVarArg=*/ !Outs[i].IsFixed); + bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); + assert(!Res && "Call operand has unhandled type"); + (void)Res; + } + } else { + // At this point, Outs[].VT may already be promoted to i32. To correctly + // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and + // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT. + // Since AnalyzeCallOperands uses Ins[].VT for both ValVT and LocVT, here + // we use a special version of AnalyzeCallOperands to pass in ValVT and + // LocVT. + unsigned NumArgs = Outs.size(); + for (unsigned i = 0; i != NumArgs; ++i) { + MVT ValVT = Outs[i].VT; + // Get type of the original argument. + EVT ActualVT = getValueType(CLI.getArgs()[Outs[i].OrigArgIndex].Ty, + /*AllowUnknown*/ true); + MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ValVT; + ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; + // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. + if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) + ValVT = MVT::i8; + else if (ActualMVT == MVT::i16) + ValVT = MVT::i16; + + CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false); + bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full, ArgFlags, CCInfo); + assert(!Res && "Call operand has unhandled type"); + (void)Res; } } - if (LHS.getValueType().isInteger()) { - SDValue A64cc; - - // Integers are handled in a separate function because the combinations of - // immediates and tests can get hairy and we may want to fiddle things. - SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); + // Get a count of how many bytes are to be pushed on the stack. + unsigned NumBytes = CCInfo.getNextStackOffset(); - return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, - Chain, CmpOp, A64cc, DestBB); + if (IsSibCall) { + // Since we're not changing the ABI to make this a tail call, the memory + // operands are already available in the caller's incoming argument space. + NumBytes = 0; } - // Note that some LLVM floating-point CondCodes can't be lowered to a single - // conditional branch, hence FPCCToA64CC can set a second test, where either - // passing is sufficient. - A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; - CondCode = FPCCToA64CC(CC, Alternative); - SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); - SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, - DAG.getCondCode(CC)); - SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, - Chain, SetCC, A64cc, DestBB); + // FPDiff is the byte offset of the call's argument area from the callee's. + // Stores to callee stack arguments will be placed in FixedStackSlots offset + // by this amount for a tail call. In a sibling call it must be 0 because the + // caller will deallocate the entire stack and the callee still expects its + // arguments to begin at SP+0. Completely unused for non-tail calls. + int FPDiff = 0; - if (Alternative != A64CC::Invalid) { - A64cc = DAG.getConstant(Alternative, MVT::i32); - A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, - A64BR_CC, SetCC, A64cc, DestBB); + if (IsTailCall && !IsSibCall) { + unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); - } + // Since callee will pop argument stack as a tail call, we must keep the + // popped size 16-byte aligned. + NumBytes = RoundUpToAlignment(NumBytes, 16); - return A64BR_CC; -} + // FPDiff will be negative if this tail call requires more space than we + // would automatically have in our incoming argument space. Positive if we + // can actually shrink the stack. + FPDiff = NumReusableBytes - NumBytes; -SDValue -AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG, - RTLIB::Libcall Call) const { - ArgListTy Args; - ArgListEntry Entry; - for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) { - EVT ArgVT = Op.getOperand(i).getValueType(); - Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); - Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy; - Entry.isSExt = false; - Entry.isZExt = false; - Args.push_back(Entry); + // The stack pointer must be 16-byte aligned at all times it's used for a + // memory operation, which in practice means at *all* times and in + // particular across call boundaries. Therefore our own arguments started at + // a 16-byte aligned SP and the delta applied for the tail call should + // satisfy the same constraint. + assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); } - SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy()); - Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext()); + // Adjust the stack pointer for the new arguments... + // These operations are automatically eliminated by the prolog/epilog pass + if (!IsSibCall) + Chain = + DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), DL); - // By default, the input chain to this libcall is the entry node of the - // function. If the libcall is going to be emitted as a tail call then - // isUsedByReturnOnly will change it to the right chain if the return - // node which is being folded has a non-entry input chain. - SDValue InChain = DAG.getEntryNode(); + SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP, getPointerTy()); - // isTailCall may be true since the callee does not reference caller stack - // frame. Check if it's in the right position. - SDValue TCChain = InChain; - bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain); - if (isTailCall) - InChain = TCChain; + SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; + SmallVector<SDValue, 8> MemOpChains; - TargetLowering:: - CallLoweringInfo CLI(InChain, RetTy, false, false, false, false, - 0, getLibcallCallingConv(Call), isTailCall, - /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, - Callee, Args, DAG, SDLoc(Op)); - std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); + // Walk the register/memloc assignments, inserting copies/loads. + for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e; + ++i, ++realArgIdx) { + CCValAssign &VA = ArgLocs[i]; + SDValue Arg = OutVals[realArgIdx]; + ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; - if (!CallInfo.second.getNode()) - // It's a tailcall, return the chain (which is the DAG root). - return DAG.getRoot(); + // Promote the value if needed. + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::AExt: + if (Outs[realArgIdx].ArgVT == MVT::i1) { + // AAPCS requires i1 to be zero-extended to 8-bits by the caller. + Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); + Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg); + } + Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::BCvt: + Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::FPExt: + Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); + break; + } - return CallInfo.first; -} + if (VA.isRegLoc()) { + if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i64) { + assert(VA.getLocVT() == MVT::i64 && + "unexpected calling convention register assignment"); + assert(!Ins.empty() && Ins[0].VT == MVT::i64 && + "unexpected use of 'returned'"); + IsThisReturn = true; + } + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + } else { + assert(VA.isMemLoc()); -SDValue -AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { - if (Op.getOperand(0).getValueType() != MVT::f128) { - // It's legal except when f128 is involved - return Op; - } + SDValue DstAddr; + MachinePointerInfo DstInfo; - RTLIB::Libcall LC; - LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); + // FIXME: This works on big-endian for composite byvals, which are the + // common case. It should also work for fundamental types too. + uint32_t BEAlign = 0; + unsigned OpSize = Flags.isByVal() ? Flags.getByValSize() * 8 + : VA.getLocVT().getSizeInBits(); + OpSize = (OpSize + 7) / 8; + if (!Subtarget->isLittleEndian() && !Flags.isByVal()) { + if (OpSize < 8) + BEAlign = 8 - OpSize; + } + unsigned LocMemOffset = VA.getLocMemOffset(); + int32_t Offset = LocMemOffset + BEAlign; + SDValue PtrOff = DAG.getIntPtrConstant(Offset); + PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff); + + if (IsTailCall) { + Offset = Offset + FPDiff; + int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); + + DstAddr = DAG.getFrameIndex(FI, getPointerTy()); + DstInfo = MachinePointerInfo::getFixedStack(FI); + + // Make sure any stack arguments overlapping with where we're storing + // are loaded before this eventual operation. Otherwise they'll be + // clobbered. + Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); + } else { + SDValue PtrOff = DAG.getIntPtrConstant(Offset); - SDValue SrcVal = Op.getOperand(0); - return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1, - /*isSigned*/ false, SDLoc(Op)).first; -} + DstAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, PtrOff); + DstInfo = MachinePointerInfo::getStack(LocMemOffset); + } -SDValue -AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { - assert(Op.getValueType() == MVT::f128 && "Unexpected lowering"); + if (Outs[i].Flags.isByVal()) { + SDValue SizeNode = + DAG.getConstant(Outs[i].Flags.getByValSize(), MVT::i64); + SDValue Cpy = DAG.getMemcpy( + Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(), + /*isVolatile = */ false, + /*alwaysInline = */ false, DstInfo, MachinePointerInfo()); - RTLIB::Libcall LC; - LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); + MemOpChains.push_back(Cpy); + } else { + // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already + // promoted to a legal register type i32, we should truncate Arg back to + // i1/i8/i16. + if (Arg.getValueType().isSimple() && + Arg.getValueType().getSimpleVT() == MVT::i32 && + (VA.getLocVT() == MVT::i1 || VA.getLocVT() == MVT::i8 || + VA.getLocVT() == MVT::i16)) + Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getLocVT(), Arg); + + SDValue Store = + DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, false, false, 0); + MemOpChains.push_back(Store); + } + } + } - return LowerF128ToCall(Op, DAG, LC); -} + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); -static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG, - bool IsSigned) { - SDLoc dl(Op); - EVT VT = Op.getValueType(); - SDValue Vec = Op.getOperand(0); - EVT OpVT = Vec.getValueType(); - unsigned Opc = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT; - - if (VT.getVectorNumElements() == 1) { - assert(OpVT == MVT::v1f64 && "Unexpected vector type!"); - if (VT.getSizeInBits() == OpVT.getSizeInBits()) - return Op; - return DAG.UnrollVectorOp(Op.getNode()); - } - - if (VT.getSizeInBits() > OpVT.getSizeInBits()) { - assert(Vec.getValueType() == MVT::v2f32 && VT == MVT::v2i64 && - "Unexpected vector type!"); - Vec = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v2f64, Vec); - return DAG.getNode(Opc, dl, VT, Vec); - } else if (VT.getSizeInBits() < OpVT.getSizeInBits()) { - EVT CastVT = EVT::getIntegerVT(*DAG.getContext(), - OpVT.getVectorElementType().getSizeInBits()); - CastVT = - EVT::getVectorVT(*DAG.getContext(), CastVT, VT.getVectorNumElements()); - Vec = DAG.getNode(Opc, dl, CastVT, Vec); - return DAG.getNode(ISD::TRUNCATE, dl, VT, Vec); - } - return DAG.getNode(Opc, dl, VT, Vec); -} - -static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { - // We custom lower concat_vectors with 4, 8, or 16 operands that are all the - // same operand and of type v1* using the DUP instruction. - unsigned NumOps = Op->getNumOperands(); - if (NumOps == 2) { - assert(Op.getValueType().getSizeInBits() == 128 && "unexpected concat"); - return Op; + // Build a sequence of copy-to-reg nodes chained together with token chain + // and flag operands which copy the outgoing args into the appropriate regs. + SDValue InFlag; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first, + RegsToPass[i].second, InFlag); + InFlag = Chain.getValue(1); } - if (NumOps != 4 && NumOps != 8 && NumOps != 16) - return SDValue(); - - // Must be a single value for VDUP. - SDValue Op0 = Op.getOperand(0); - for (unsigned i = 1; i < NumOps; ++i) { - SDValue OpN = Op.getOperand(i); - if (Op0 != OpN) - return SDValue(); + // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every + // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol + // node so that legalize doesn't hack it. + if (getTargetMachine().getCodeModel() == CodeModel::Large && + Subtarget->isTargetMachO()) { + if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { + const GlobalValue *GV = G->getGlobal(); + bool InternalLinkage = GV->hasInternalLinkage(); + if (InternalLinkage) + Callee = DAG.getTargetGlobalAddress(GV, DL, getPointerTy(), 0, 0); + else { + Callee = DAG.getTargetGlobalAddress(GV, DL, getPointerTy(), 0, + AArch64II::MO_GOT); + Callee = DAG.getNode(AArch64ISD::LOADgot, DL, getPointerTy(), Callee); + } + } else if (ExternalSymbolSDNode *S = + dyn_cast<ExternalSymbolSDNode>(Callee)) { + const char *Sym = S->getSymbol(); + Callee = + DAG.getTargetExternalSymbol(Sym, getPointerTy(), AArch64II::MO_GOT); + Callee = DAG.getNode(AArch64ISD::LOADgot, DL, getPointerTy(), Callee); + } + } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { + const GlobalValue *GV = G->getGlobal(); + Callee = DAG.getTargetGlobalAddress(GV, DL, getPointerTy(), 0, 0); + } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { + const char *Sym = S->getSymbol(); + Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), 0); } - // Verify the value type. - EVT EltVT = Op0.getValueType(); - switch (NumOps) { - default: llvm_unreachable("Unexpected number of operands"); - case 4: - if (EltVT != MVT::v1i16 && EltVT != MVT::v1i32) - return SDValue(); - break; - case 8: - if (EltVT != MVT::v1i8 && EltVT != MVT::v1i16) - return SDValue(); - break; - case 16: - if (EltVT != MVT::v1i8) - return SDValue(); - break; + // We don't usually want to end the call-sequence here because we would tidy + // the frame up *after* the call, however in the ABI-changing tail-call case + // we've carefully laid out the parameters so that when sp is reset they'll be + // in the correct location. + if (IsTailCall && !IsSibCall) { + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), + DAG.getIntPtrConstant(0, true), InFlag, DL); + InFlag = Chain.getValue(1); } - SDLoc DL(Op); - EVT VT = Op.getValueType(); - // VDUP produces better code for constants. - if (Op0->getOpcode() == ISD::BUILD_VECTOR) - return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Op0->getOperand(0)); - return DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, Op0, - DAG.getConstant(0, MVT::i64)); -} + std::vector<SDValue> Ops; + Ops.push_back(Chain); + Ops.push_back(Callee); -SDValue -AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, - bool IsSigned) const { - if (Op.getValueType().isVector()) - return LowerVectorFP_TO_INT(Op, DAG, IsSigned); - if (Op.getOperand(0).getValueType() != MVT::f128) { - // It's legal except when f128 is involved - return Op; + if (IsTailCall) { + // Each tail call may have to adjust the stack by a different amount, so + // this information must travel along with the operation for eventual + // consumption by emitEpilogue. + Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32)); } - RTLIB::Libcall LC; - if (IsSigned) - LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType()); - else - LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType()); + // Add argument registers to the end of the list so that they are known live + // into the call. + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(RegsToPass[i].first, + RegsToPass[i].second.getValueType())); - return LowerF128ToCall(Op, DAG, LC); -} + // Add a register mask operand representing the call-preserved registers. + const uint32_t *Mask; + const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); + const AArch64RegisterInfo *ARI = + static_cast<const AArch64RegisterInfo *>(TRI); + if (IsThisReturn) { + // For 'this' returns, use the X0-preserving mask if applicable + Mask = ARI->getThisReturnPreservedMask(CallConv); + if (!Mask) { + IsThisReturn = false; + Mask = ARI->getCallPreservedMask(CallConv); + } + } else + Mask = ARI->getCallPreservedMask(CallConv); -SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ - MachineFunction &MF = DAG.getMachineFunction(); - MachineFrameInfo *MFI = MF.getFrameInfo(); - MFI->setReturnAddressIsTaken(true); + assert(Mask && "Missing call preserved mask for calling convention"); + Ops.push_back(DAG.getRegisterMask(Mask)); - if (verifyReturnAddressArgumentIsConstant(Op, DAG)) - return SDValue(); + if (InFlag.getNode()) + Ops.push_back(InFlag); - EVT VT = Op.getValueType(); - SDLoc dl(Op); - unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); - if (Depth) { - SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); - SDValue Offset = DAG.getConstant(8, MVT::i64); - return DAG.getLoad(VT, dl, DAG.getEntryNode(), - DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), - MachinePointerInfo(), false, false, false, 0); - } + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); - // Return X30, which contains the return address. Mark it an implicit live-in. - unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64)); - return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64); -} + // If we're doing a tall call, use a TC_RETURN here rather than an + // actual call instruction. + if (IsTailCall) + return DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops); + // Returns a chain and a flag for retval copy to use. + Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops); + InFlag = Chain.getValue(1); -SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) - const { - MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); - MFI->setFrameAddressIsTaken(true); + uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt) + ? RoundUpToAlignment(NumBytes, 16) + : 0; - EVT VT = Op.getValueType(); - SDLoc dl(Op); - unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); - unsigned FrameReg = AArch64::X29; - SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); - while (Depth--) - FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, - MachinePointerInfo(), - false, false, false, 0); - return FrameAddr; + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), + DAG.getIntPtrConstant(CalleePopBytes, true), + InFlag, DL); + if (!Ins.empty()) + InFlag = Chain.getValue(1); + + // Handle result values, copying them out of physregs into vregs that we + // return. + return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, + InVals, IsThisReturn, + IsThisReturn ? OutVals[0] : SDValue()); +} + +bool AArch64TargetLowering::CanLowerReturn( + CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { + CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS + ? RetCC_AArch64_WebKit_JS + : RetCC_AArch64_AAPCS; + SmallVector<CCValAssign, 16> RVLocs; + CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); + return CCInfo.CheckReturn(Outs, RetCC); } SDValue -AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op, - SelectionDAG &DAG) const { - assert(getTargetMachine().getCodeModel() == CodeModel::Large); - assert(getTargetMachine().getRelocationModel() == Reloc::Static); +AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + SDLoc DL, SelectionDAG &DAG) const { + CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS + ? RetCC_AArch64_WebKit_JS + : RetCC_AArch64_AAPCS; + SmallVector<CCValAssign, 16> RVLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), + getTargetMachine(), RVLocs, *DAG.getContext()); + CCInfo.AnalyzeReturn(Outs, RetCC); - EVT PtrVT = getPointerTy(); - SDLoc dl(Op); - const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); - const GlobalValue *GV = GN->getGlobal(); + // Copy the result values into the output registers. + SDValue Flag; + SmallVector<SDValue, 4> RetOps(1, Chain); + for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size(); + ++i, ++realRVLocIdx) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + SDValue Arg = OutVals[realRVLocIdx]; + + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + if (Outs[i].ArgVT == MVT::i1) { + // AAPCS requires i1 to be zero-extended to i8 by the producer of the + // value. This is strictly redundant on Darwin (which uses "zeroext + // i1"), but will be optimised out before ISel. + Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); + Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); + } + break; + case CCValAssign::BCvt: + Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); + break; + } + + Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); + Flag = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); + } - SDValue GlobalAddr = DAG.getNode( - AArch64ISD::WrapperLarge, dl, PtrVT, - DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3), - DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC), - DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC), - DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); + RetOps[0] = Chain; // Update chain. - if (GN->getOffset() != 0) - return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, - DAG.getConstant(GN->getOffset(), PtrVT)); + // Add the flag if we have it. + if (Flag.getNode()) + RetOps.push_back(Flag); - return GlobalAddr; + return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps); } -SDValue -AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op, - SelectionDAG &DAG) const { - assert(getTargetMachine().getCodeModel() == CodeModel::Small); +//===----------------------------------------------------------------------===// +// Other Lowering Code +//===----------------------------------------------------------------------===// +SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(); - SDLoc dl(Op); - const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); - const GlobalValue *GV = GN->getGlobal(); - unsigned Alignment = GV->getAlignment(); - Reloc::Model RelocM = getTargetMachine().getRelocationModel(); - if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) { - // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate - // to zero when they remain undefined. In PIC mode the GOT can take care of - // this, but in absolute mode we use a constant pool load. - SDValue PoolAddr; - PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, - DAG.getTargetConstantPool(GV, PtrVT, 0, 0, - AArch64II::MO_NO_FLAG), - DAG.getTargetConstantPool(GV, PtrVT, 0, 0, - AArch64II::MO_LO12), - DAG.getConstant(8, MVT::i32)); - SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr, - MachinePointerInfo::getConstantPool(), - /*isVolatile=*/ false, - /*isNonTemporal=*/ true, - /*isInvariant=*/ true, 8); - if (GN->getOffset() != 0) - return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, - DAG.getConstant(GN->getOffset(), PtrVT)); - - return GlobalAddr; - } - - if (Alignment == 0) { - const PointerType *GVPtrTy = cast<PointerType>(GV->getType()); - if (GVPtrTy->getElementType()->isSized()) { - Alignment - = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType()); - } else { - // Be conservative if we can't guess, not that it really matters: - // functions and labels aren't valid for loads, and the methods used to - // actually calculate an address work with any alignment. - Alignment = 1; - } + SDLoc DL(Op); + const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); + unsigned char OpFlags = + Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); + + assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 && + "unexpected offset in global node"); + + // This also catched the large code model case for Darwin. + if ((OpFlags & AArch64II::MO_GOT) != 0) { + SDValue GotAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); + // FIXME: Once remat is capable of dealing with instructions with register + // operands, expand this into two nodes instead of using a wrapper node. + return DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, GotAddr); } - unsigned char HiFixup, LoFixup; - bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM); - - if (UseGOT) { - HiFixup = AArch64II::MO_GOT; - LoFixup = AArch64II::MO_GOT_LO12; - Alignment = 8; + if (getTargetMachine().getCodeModel() == CodeModel::Large) { + const unsigned char MO_NC = AArch64II::MO_NC; + return DAG.getNode( + AArch64ISD::WrapperLarge, DL, PtrVT, + DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G3), + DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G2 | MO_NC), + DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G1 | MO_NC), + DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G0 | MO_NC)); } else { - HiFixup = AArch64II::MO_NO_FLAG; - LoFixup = AArch64II::MO_LO12; + // Use ADRP/ADD or ADRP/LDR for everything else: the small model on ELF and + // the only correct model on Darwin. + SDValue Hi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, + OpFlags | AArch64II::MO_PAGE); + unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC; + SDValue Lo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, LoFlags); + + SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi); + return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo); } +} - // AArch64's small model demands the following sequence: - // ADRP x0, somewhere - // ADD x0, x0, #:lo12:somewhere ; (or LDR directly). - SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, - DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, - HiFixup), - DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, - LoFixup), - DAG.getConstant(Alignment, MVT::i32)); +/// \brief Convert a TLS address reference into the correct sequence of loads +/// and calls to compute the variable's address (for Darwin, currently) and +/// return an SDValue containing the final node. - if (UseGOT) { - GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(), - GlobalRef); - } +/// Darwin only has one TLS scheme which must be capable of dealing with the +/// fully general situation, in the worst case. This means: +/// + "extern __thread" declaration. +/// + Defined in a possibly unknown dynamic library. +/// +/// The general system is that each __thread variable has a [3 x i64] descriptor +/// which contains information used by the runtime to calculate the address. The +/// only part of this the compiler needs to know about is the first xword, which +/// contains a function pointer that must be called with the address of the +/// entire descriptor in "x0". +/// +/// Since this descriptor may be in a different unit, in general even the +/// descriptor must be accessed via an indirect load. The "ideal" code sequence +/// is: +/// adrp x0, _var@TLVPPAGE +/// ldr x0, [x0, _var@TLVPPAGEOFF] ; x0 now contains address of descriptor +/// ldr x1, [x0] ; x1 contains 1st entry of descriptor, +/// ; the function pointer +/// blr x1 ; Uses descriptor address in x0 +/// ; Address of _var is now in x0. +/// +/// If the address of _var's descriptor *is* known to the linker, then it can +/// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for +/// a slight efficiency gain. +SDValue +AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op, + SelectionDAG &DAG) const { + assert(Subtarget->isTargetDarwin() && "TLS only supported on Darwin"); - if (GN->getOffset() != 0) - return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef, - DAG.getConstant(GN->getOffset(), PtrVT)); + SDLoc DL(Op); + MVT PtrVT = getPointerTy(); + const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); - return GlobalRef; -} + SDValue TLVPAddr = + DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); + SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr); -SDValue -AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op, - SelectionDAG &DAG) const { - // TableGen doesn't have easy access to the CodeModel or RelocationModel, so - // we make those distinctions here. - - switch (getTargetMachine().getCodeModel()) { - case CodeModel::Small: - return LowerGlobalAddressELFSmall(Op, DAG); - case CodeModel::Large: - return LowerGlobalAddressELFLarge(Op, DAG); - default: - llvm_unreachable("Only small and large code models supported now"); - } -} + // The first entry in the descriptor is a function pointer that we must call + // to obtain the address of the variable. + SDValue Chain = DAG.getEntryNode(); + SDValue FuncTLVGet = + DAG.getLoad(MVT::i64, DL, Chain, DescAddr, MachinePointerInfo::getGOT(), + false, true, true, 8); + Chain = FuncTLVGet.getValue(1); -SDValue -AArch64TargetLowering::LowerConstantPool(SDValue Op, - SelectionDAG &DAG) const { - SDLoc DL(Op); - EVT PtrVT = getPointerTy(); - ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Op); - const Constant *C = CN->getConstVal(); - - switch(getTargetMachine().getCodeModel()) { - case CodeModel::Small: - // The most efficient code is PC-relative anyway for the small memory model, - // so we don't need to worry about relocation model. - return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, - DAG.getTargetConstantPool(C, PtrVT, 0, 0, - AArch64II::MO_NO_FLAG), - DAG.getTargetConstantPool(C, PtrVT, 0, 0, - AArch64II::MO_LO12), - DAG.getConstant(CN->getAlignment(), MVT::i32)); - case CodeModel::Large: - return DAG.getNode( - AArch64ISD::WrapperLarge, DL, PtrVT, - DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G3), - DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC), - DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC), - DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC)); - default: - llvm_unreachable("Only small and large code models supported now"); - } + MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); + MFI->setAdjustsStack(true); + + // TLS calls preserve all registers except those that absolutely must be + // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be + // silly). + const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); + const AArch64RegisterInfo *ARI = + static_cast<const AArch64RegisterInfo *>(TRI); + const uint32_t *Mask = ARI->getTLSCallPreservedMask(); + + // Finally, we can make the call. This is just a degenerate version of a + // normal AArch64 call node: x0 takes the address of the descriptor, and + // returns the address of the variable in this thread. + Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue()); + Chain = + DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), + Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64), + DAG.getRegisterMask(Mask), Chain.getValue(1)); + return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1)); } -SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr, - SDValue DescAddr, - SDLoc DL, - SelectionDAG &DAG) const { +/// When accessing thread-local variables under either the general-dynamic or +/// local-dynamic system, we make a "TLS-descriptor" call. The variable will +/// have a descriptor, accessible via a PC-relative ADRP, and whose first entry +/// is a function pointer to carry out the resolution. This function takes the +/// address of the descriptor in X0 and returns the TPIDR_EL0 offset in X0. All +/// other registers (except LR, NZCV) are preserved. +/// +/// Thus, the ideal call sequence on AArch64 is: +/// +/// adrp x0, :tlsdesc:thread_var +/// ldr x8, [x0, :tlsdesc_lo12:thread_var] +/// add x0, x0, :tlsdesc_lo12:thread_var +/// .tlsdesccall thread_var +/// blr x8 +/// (TPIDR_EL0 offset now in x0). +/// +/// The ".tlsdesccall" directive instructs the assembler to insert a particular +/// relocation to help the linker relax this sequence if it turns out to be too +/// conservative. +/// +/// FIXME: we currently produce an extra, duplicated, ADRP instruction, but this +/// is harmless. +SDValue AArch64TargetLowering::LowerELFTLSDescCall(SDValue SymAddr, + SDValue DescAddr, SDLoc DL, + SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(); // The function we need to call is simply the first entry in the GOT for this // descriptor, load it in preparation. - SDValue Func, Chain; - Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), - DescAddr); + SDValue Func = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, SymAddr); + + // TLS calls preserve all registers except those that absolutely must be + // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be + // silly). + const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); + const AArch64RegisterInfo *ARI = + static_cast<const AArch64RegisterInfo *>(TRI); + const uint32_t *Mask = ARI->getTLSCallPreservedMask(); // The function takes only one argument: the address of the descriptor itself // in X0. - SDValue Glue; + SDValue Glue, Chain; Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue); Glue = Chain.getValue(1); - // Finally, there's a special calling-convention which means that the lookup - // must preserve all registers (except X0, obviously). - const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); - const AArch64RegisterInfo *A64RI - = static_cast<const AArch64RegisterInfo *>(TRI); - const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask(); - // We're now ready to populate the argument list, as with a normal call: - std::vector<SDValue> Ops; + SmallVector<SDValue, 6> Ops; Ops.push_back(Chain); Ops.push_back(Func); Ops.push_back(SymAddr); @@ -2586,22 +2705,18 @@ SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr, Ops.push_back(Glue); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); - Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0], - Ops.size()); + Chain = DAG.getNode(AArch64ISD::TLSDESC_CALL, DL, NodeTys, Ops); Glue = Chain.getValue(1); - // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it - // back to the generic handling code. return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue); } SDValue -AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, - SelectionDAG &DAG) const { - assert(getSubtarget()->isTargetELF() && - "TLS not implemented for non-ELF targets"); - assert(getTargetMachine().getCodeModel() == CodeModel::Small - && "TLS only supported in small memory model"); +AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op, + SelectionDAG &DAG) const { + assert(Subtarget->isTargetELF() && "This function expects an ELF target"); + assert(getTargetMachine().getCodeModel() == CodeModel::Small && + "ELF TLS only supported in small memory model"); const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); @@ -2613,39 +2728,22 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); - if (Model == TLSModel::InitialExec) { - TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, - DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, - AArch64II::MO_GOTTPREL), - DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, - AArch64II::MO_GOTTPREL_LO12), - DAG.getConstant(8, MVT::i32)); - TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), - TPOff); - } else if (Model == TLSModel::LocalExec) { - SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, - AArch64II::MO_TPREL_G1); - SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, - AArch64II::MO_TPREL_G0_NC); - - TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, - DAG.getTargetConstant(1, MVT::i32)), 0); - TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, - TPOff, LoVar, - DAG.getTargetConstant(0, MVT::i32)), 0); - } else if (Model == TLSModel::GeneralDynamic) { - // Accesses used in this sequence go via the TLS descriptor which lives in - // the GOT. Prepare an address we can use to handle this. - SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, - AArch64II::MO_TLSDESC); - SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, - AArch64II::MO_TLSDESC_LO12); - SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, - HiDesc, LoDesc, - DAG.getConstant(8, MVT::i32)); - SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0); - - TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); + if (Model == TLSModel::LocalExec) { + SDValue HiVar = DAG.getTargetGlobalAddress( + GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1); + SDValue LoVar = DAG.getTargetGlobalAddress( + GV, DL, PtrVT, 0, + AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC); + + TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar, + DAG.getTargetConstant(16, MVT::i32)), + 0); + TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, TPOff, LoVar, + DAG.getTargetConstant(0, MVT::i32)), + 0); + } else if (Model == TLSModel::InitialExec) { + TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); + TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff); } else if (Model == TLSModel::LocalDynamic) { // Local-dynamic accesses proceed in two phases. A general-dynamic TLS // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate @@ -2653,367 +2751,354 @@ AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, // calculation. // These accesses will need deduplicating if there's more than one. - AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction() - .getInfo<AArch64MachineFunctionInfo>(); + AArch64FunctionInfo *MFI = + DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); MFI->incNumLocalDynamicTLSAccesses(); - - // Get the location of _TLS_MODULE_BASE_: - SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, - AArch64II::MO_TLSDESC); - SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, - AArch64II::MO_TLSDESC_LO12); - SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, - HiDesc, LoDesc, - DAG.getConstant(8, MVT::i32)); - SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT); - - ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); - - // Get the variable's offset from _TLS_MODULE_BASE_ - SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, - AArch64II::MO_DTPREL_G1); - SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, - AArch64II::MO_DTPREL_G0_NC); - - TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, - DAG.getTargetConstant(0, MVT::i32)), 0); - TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, - TPOff, LoVar, - DAG.getTargetConstant(0, MVT::i32)), 0); + // Accesses used in this sequence go via the TLS descriptor which lives in + // the GOT. Prepare an address we can use to handle this. + SDValue HiDesc = DAG.getTargetExternalSymbol( + "_TLS_MODULE_BASE_", PtrVT, AArch64II::MO_TLS | AArch64II::MO_PAGE); + SDValue LoDesc = DAG.getTargetExternalSymbol( + "_TLS_MODULE_BASE_", PtrVT, + AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); + + // First argument to the descriptor call is the address of the descriptor + // itself. + SDValue DescAddr = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, HiDesc); + DescAddr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, DescAddr, LoDesc); + + // The call needs a relocation too for linker relaxation. It doesn't make + // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of + // the address. + SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, + AArch64II::MO_TLS); + + // Now we can calculate the offset from TPIDR_EL0 to this module's + // thread-local area. + TPOff = LowerELFTLSDescCall(SymAddr, DescAddr, DL, DAG); + + // Now use :dtprel_whatever: operations to calculate this variable's offset + // in its thread-storage area. + SDValue HiVar = DAG.getTargetGlobalAddress( + GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_G1); + SDValue LoVar = DAG.getTargetGlobalAddress( + GV, DL, MVT::i64, 0, + AArch64II::MO_TLS | AArch64II::MO_G0 | AArch64II::MO_NC); + + SDValue DTPOff = + SDValue(DAG.getMachineNode(AArch64::MOVZXi, DL, PtrVT, HiVar, + DAG.getTargetConstant(16, MVT::i32)), + 0); + DTPOff = + SDValue(DAG.getMachineNode(AArch64::MOVKXi, DL, PtrVT, DTPOff, LoVar, + DAG.getTargetConstant(0, MVT::i32)), + 0); + + TPOff = DAG.getNode(ISD::ADD, DL, PtrVT, TPOff, DTPOff); + } else if (Model == TLSModel::GeneralDynamic) { + // Accesses used in this sequence go via the TLS descriptor which lives in + // the GOT. Prepare an address we can use to handle this. + SDValue HiDesc = DAG.getTargetGlobalAddress( + GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGE); + SDValue LoDesc = DAG.getTargetGlobalAddress( + GV, DL, PtrVT, 0, + AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); + + // First argument to the descriptor call is the address of the descriptor + // itself. + SDValue DescAddr = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, HiDesc); + DescAddr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, DescAddr, LoDesc); + + // The call needs a relocation too for linker relaxation. It doesn't make + // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of + // the address. + SDValue SymAddr = + DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); + + // Finally we can make a call to calculate the offset from tpidr_el0. + TPOff = LowerELFTLSDescCall(SymAddr, DescAddr, DL, DAG); } else - llvm_unreachable("Unsupported TLS access model"); - + llvm_unreachable("Unsupported ELF TLS access model"); return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); } -static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG, - bool IsSigned) { +SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, + SelectionDAG &DAG) const { + if (Subtarget->isTargetDarwin()) + return LowerDarwinGlobalTLSAddress(Op, DAG); + else if (Subtarget->isTargetELF()) + return LowerELFGlobalTLSAddress(Op, DAG); + + llvm_unreachable("Unexpected platform trying to use TLS"); +} +SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { + SDValue Chain = Op.getOperand(0); + ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); + SDValue LHS = Op.getOperand(2); + SDValue RHS = Op.getOperand(3); + SDValue Dest = Op.getOperand(4); SDLoc dl(Op); - EVT VT = Op.getValueType(); - SDValue Vec = Op.getOperand(0); - unsigned Opc = IsSigned ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; - if (VT.getVectorNumElements() == 1) { - assert(VT == MVT::v1f64 && "Unexpected vector type!"); - if (VT.getSizeInBits() == Vec.getValueSizeInBits()) - return Op; - return DAG.UnrollVectorOp(Op.getNode()); - } + // Handle f128 first, since lowering it will result in comparing the return + // value of a libcall against zero, which is just what the rest of LowerBR_CC + // is expecting to deal with. + if (LHS.getValueType() == MVT::f128) { + softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); - if (VT.getSizeInBits() < Vec.getValueSizeInBits()) { - assert(Vec.getValueType() == MVT::v2i64 && VT == MVT::v2f32 && - "Unexpected vector type!"); - Vec = DAG.getNode(Opc, dl, MVT::v2f64, Vec); - return DAG.getNode(ISD::FP_ROUND, dl, VT, Vec, DAG.getIntPtrConstant(0)); - } else if (VT.getSizeInBits() > Vec.getValueSizeInBits()) { - unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; - EVT CastVT = EVT::getIntegerVT(*DAG.getContext(), - VT.getVectorElementType().getSizeInBits()); - CastVT = - EVT::getVectorVT(*DAG.getContext(), CastVT, VT.getVectorNumElements()); - Vec = DAG.getNode(CastOpc, dl, CastVT, Vec); + // If softenSetCCOperands returned a scalar, we need to compare the result + // against zero to select between true and false values. + if (!RHS.getNode()) { + RHS = DAG.getConstant(0, LHS.getValueType()); + CC = ISD::SETNE; + } } - return DAG.getNode(Opc, dl, VT, Vec); -} + // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch + // instruction. + unsigned Opc = LHS.getOpcode(); + if (LHS.getResNo() == 1 && isa<ConstantSDNode>(RHS) && + cast<ConstantSDNode>(RHS)->isOne() && + (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || + Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)) { + assert((CC == ISD::SETEQ || CC == ISD::SETNE) && + "Unexpected condition code."); + // Only lower legal XALUO ops. + if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) + return SDValue(); -SDValue -AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, - bool IsSigned) const { - if (Op.getValueType().isVector()) - return LowerVectorINT_TO_FP(Op, DAG, IsSigned); - if (Op.getValueType() != MVT::f128) { - // Legal for everything except f128. - return Op; - } + // The actual operation with overflow check. + AArch64CC::CondCode OFCC; + SDValue Value, Overflow; + std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG); - RTLIB::Libcall LC; - if (IsSigned) - LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); - else - LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); + if (CC == ISD::SETNE) + OFCC = getInvertedCondCode(OFCC); + SDValue CCVal = DAG.getConstant(OFCC, MVT::i32); - return LowerF128ToCall(Op, DAG, LC); -} + return DAG.getNode(AArch64ISD::BRCOND, SDLoc(LHS), MVT::Other, Chain, Dest, + CCVal, Overflow); + } + if (LHS.getValueType().isInteger()) { + assert((LHS.getValueType() == RHS.getValueType()) && + (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)); + + // If the RHS of the comparison is zero, we can potentially fold this + // to a specialized branch. + const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS); + if (RHSC && RHSC->getZExtValue() == 0) { + if (CC == ISD::SETEQ) { + // See if we can use a TBZ to fold in an AND as well. + // TBZ has a smaller branch displacement than CBZ. If the offset is + // out of bounds, a late MI-layer pass rewrites branches. + // 403.gcc is an example that hits this case. + if (LHS.getOpcode() == ISD::AND && + isa<ConstantSDNode>(LHS.getOperand(1)) && + isPowerOf2_64(LHS.getConstantOperandVal(1))) { + SDValue Test = LHS.getOperand(0); + uint64_t Mask = LHS.getConstantOperandVal(1); + + // TBZ only operates on i64's, but the ext should be free. + if (Test.getValueType() == MVT::i32) + Test = DAG.getAnyExtOrTrunc(Test, dl, MVT::i64); + + return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test, + DAG.getConstant(Log2_64(Mask), MVT::i64), Dest); + } -SDValue -AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { - JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); - SDLoc dl(JT); - EVT PtrVT = getPointerTy(); + return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest); + } else if (CC == ISD::SETNE) { + // See if we can use a TBZ to fold in an AND as well. + // TBZ has a smaller branch displacement than CBZ. If the offset is + // out of bounds, a late MI-layer pass rewrites branches. + // 403.gcc is an example that hits this case. + if (LHS.getOpcode() == ISD::AND && + isa<ConstantSDNode>(LHS.getOperand(1)) && + isPowerOf2_64(LHS.getConstantOperandVal(1))) { + SDValue Test = LHS.getOperand(0); + uint64_t Mask = LHS.getConstantOperandVal(1); + + // TBNZ only operates on i64's, but the ext should be free. + if (Test.getValueType() == MVT::i32) + Test = DAG.getAnyExtOrTrunc(Test, dl, MVT::i64); + + return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test, + DAG.getConstant(Log2_64(Mask), MVT::i64), Dest); + } - // When compiling PIC, jump tables get put in the code section so a static - // relocation-style is acceptable for both cases. - switch (getTargetMachine().getCodeModel()) { - case CodeModel::Small: - return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, - DAG.getTargetJumpTable(JT->getIndex(), PtrVT), - DAG.getTargetJumpTable(JT->getIndex(), PtrVT, - AArch64II::MO_LO12), - DAG.getConstant(1, MVT::i32)); - case CodeModel::Large: - return DAG.getNode( - AArch64ISD::WrapperLarge, dl, PtrVT, - DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3), - DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC), - DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC), - DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC)); - default: - llvm_unreachable("Only small and large code models supported now"); - } -} + return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest); + } + } -// (SELECT testbit, iftrue, iffalse) -SDValue -AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { - SDLoc dl(Op); - SDValue TheBit = Op.getOperand(0); - SDValue IfTrue = Op.getOperand(1); - SDValue IfFalse = Op.getOperand(2); + SDValue CCVal; + SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); + return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, + Cmp); + } - // AArch64 BooleanContents is the default UndefinedBooleanContent, which means - // that as the consumer we are responsible for ignoring rubbish in higher - // bits. - TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, - DAG.getConstant(1, MVT::i32)); - SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, - DAG.getConstant(0, TheBit.getValueType()), - DAG.getCondCode(ISD::SETNE)); + assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); + + // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally + // clean. Some of them require two branches to implement. + SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); + AArch64CC::CondCode CC1, CC2; + changeFPCCToAArch64CC(CC, CC1, CC2); + SDValue CC1Val = DAG.getConstant(CC1, MVT::i32); + SDValue BR1 = + DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp); + if (CC2 != AArch64CC::AL) { + SDValue CC2Val = DAG.getConstant(CC2, MVT::i32); + return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val, + Cmp); + } - return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), - A64CMP, IfTrue, IfFalse, - DAG.getConstant(A64CC::NE, MVT::i32)); + return BR1; } -static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) { - SDLoc DL(Op); - SDValue LHS = Op.getOperand(0); - SDValue RHS = Op.getOperand(1); - ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); +SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op, + SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - bool Invert = false; - SDValue Op0, Op1; - unsigned Opcode; + SDLoc DL(Op); - if (LHS.getValueType().isInteger()) { + SDValue In1 = Op.getOperand(0); + SDValue In2 = Op.getOperand(1); + EVT SrcVT = In2.getValueType(); + if (SrcVT != VT) { + if (SrcVT == MVT::f32 && VT == MVT::f64) + In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2); + else if (SrcVT == MVT::f64 && VT == MVT::f32) + In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0)); + else + // FIXME: Src type is different, bail out for now. Can VT really be a + // vector type? + return SDValue(); + } - // Attempt to use Vector Integer Compare Mask Test instruction. - // TST = icmp ne (and (op0, op1), zero). - if (CC == ISD::SETNE) { - if (((LHS.getOpcode() == ISD::AND) && - ISD::isBuildVectorAllZeros(RHS.getNode())) || - ((RHS.getOpcode() == ISD::AND) && - ISD::isBuildVectorAllZeros(LHS.getNode()))) { - - SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS; - SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0)); - SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1)); - return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS); - } + EVT VecVT; + EVT EltVT; + SDValue EltMask, VecVal1, VecVal2; + if (VT == MVT::f32 || VT == MVT::v2f32 || VT == MVT::v4f32) { + EltVT = MVT::i32; + VecVT = MVT::v4i32; + EltMask = DAG.getConstant(0x80000000ULL, EltVT); + + if (!VT.isVector()) { + VecVal1 = DAG.getTargetInsertSubreg(AArch64::ssub, DL, VecVT, + DAG.getUNDEF(VecVT), In1); + VecVal2 = DAG.getTargetInsertSubreg(AArch64::ssub, DL, VecVT, + DAG.getUNDEF(VecVT), In2); + } else { + VecVal1 = DAG.getNode(ISD::BITCAST, DL, VecVT, In1); + VecVal2 = DAG.getNode(ISD::BITCAST, DL, VecVT, In2); } - - // Attempt to use Vector Integer Compare Mask against Zero instr (Signed). - // Note: Compare against Zero does not support unsigned predicates. - if ((ISD::isBuildVectorAllZeros(RHS.getNode()) || - ISD::isBuildVectorAllZeros(LHS.getNode())) && - !isUnsignedIntSetCC(CC)) { - - // If LHS is the zero value, swap operands and CondCode. - if (ISD::isBuildVectorAllZeros(LHS.getNode())) { - CC = getSetCCSwappedOperands(CC); - Op0 = RHS; - } else - Op0 = LHS; - - // Ensure valid CondCode for Compare Mask against Zero instruction: - // EQ, GE, GT, LE, LT. - if (ISD::SETNE == CC) { - Invert = true; - CC = ISD::SETEQ; - } - - // Using constant type to differentiate integer and FP compares with zero. - Op1 = DAG.getConstant(0, MVT::i32); - Opcode = AArch64ISD::NEON_CMPZ; - + } else if (VT == MVT::f64 || VT == MVT::v2f64) { + EltVT = MVT::i64; + VecVT = MVT::v2i64; + + // We want to materialize a mask with the the high bit set, but the AdvSIMD + // immediate moves cannot materialize that in a single instruction for + // 64-bit elements. Instead, materialize zero and then negate it. + EltMask = DAG.getConstant(0, EltVT); + + if (!VT.isVector()) { + VecVal1 = DAG.getTargetInsertSubreg(AArch64::dsub, DL, VecVT, + DAG.getUNDEF(VecVT), In1); + VecVal2 = DAG.getTargetInsertSubreg(AArch64::dsub, DL, VecVT, + DAG.getUNDEF(VecVT), In2); } else { - // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned). - // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT. - bool Swap = false; - switch (CC) { - default: - llvm_unreachable("Illegal integer comparison."); - case ISD::SETEQ: - case ISD::SETGT: - case ISD::SETGE: - case ISD::SETUGT: - case ISD::SETUGE: - break; - case ISD::SETNE: - Invert = true; - CC = ISD::SETEQ; - break; - case ISD::SETULT: - case ISD::SETULE: - case ISD::SETLT: - case ISD::SETLE: - Swap = true; - CC = getSetCCSwappedOperands(CC); - } - - if (Swap) - std::swap(LHS, RHS); - - Opcode = AArch64ISD::NEON_CMP; - Op0 = LHS; - Op1 = RHS; + VecVal1 = DAG.getNode(ISD::BITCAST, DL, VecVT, In1); + VecVal2 = DAG.getNode(ISD::BITCAST, DL, VecVT, In2); } + } else { + llvm_unreachable("Invalid type for copysign!"); + } - // Generate Compare Mask instr or Compare Mask against Zero instr. - SDValue NeonCmp = - DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); + std::vector<SDValue> BuildVectorOps; + for (unsigned i = 0; i < VecVT.getVectorNumElements(); ++i) + BuildVectorOps.push_back(EltMask); - if (Invert) - NeonCmp = DAG.getNOT(DL, NeonCmp, VT); + SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, VecVT, BuildVectorOps); - return NeonCmp; + // If we couldn't materialize the mask above, then the mask vector will be + // the zero vector, and we need to negate it here. + if (VT == MVT::f64 || VT == MVT::v2f64) { + BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, BuildVec); + BuildVec = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, BuildVec); + BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, BuildVec); } - // Now handle Floating Point cases. - // Attempt to use Vector Floating Point Compare Mask against Zero instruction. - if (ISD::isBuildVectorAllZeros(RHS.getNode()) || - ISD::isBuildVectorAllZeros(LHS.getNode())) { - - // If LHS is the zero value, swap operands and CondCode. - if (ISD::isBuildVectorAllZeros(LHS.getNode())) { - CC = getSetCCSwappedOperands(CC); - Op0 = RHS; - } else - Op0 = LHS; + SDValue Sel = + DAG.getNode(AArch64ISD::BIT, DL, VecVT, VecVal1, VecVal2, BuildVec); - // Using constant type to differentiate integer and FP compares with zero. - Op1 = DAG.getConstantFP(0, MVT::f32); - Opcode = AArch64ISD::NEON_CMPZ; - } else { - // Attempt to use Vector Floating Point Compare Mask instruction. - Op0 = LHS; - Op1 = RHS; - Opcode = AArch64ISD::NEON_CMP; - } + if (VT == MVT::f32) + return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, Sel); + else if (VT == MVT::f64) + return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, Sel); + else + return DAG.getNode(ISD::BITCAST, DL, VT, Sel); +} - SDValue NeonCmpAlt; - // Some register compares have to be implemented with swapped CC and operands, - // e.g.: OLT implemented as OGT with swapped operands. - bool SwapIfRegArgs = false; +SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const { + if (DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute( + AttributeSet::FunctionIndex, Attribute::NoImplicitFloat)) + return SDValue(); - // Ensure valid CondCode for FP Compare Mask against Zero instruction: - // EQ, GE, GT, LE, LT. - // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT. - switch (CC) { - default: - llvm_unreachable("Illegal FP comparison"); - case ISD::SETUNE: - case ISD::SETNE: - Invert = true; // Fallthrough - case ISD::SETOEQ: - case ISD::SETEQ: - CC = ISD::SETEQ; - break; - case ISD::SETOLT: - case ISD::SETLT: - CC = ISD::SETLT; - SwapIfRegArgs = true; - break; - case ISD::SETOGT: - case ISD::SETGT: - CC = ISD::SETGT; - break; - case ISD::SETOLE: - case ISD::SETLE: - CC = ISD::SETLE; - SwapIfRegArgs = true; - break; - case ISD::SETOGE: - case ISD::SETGE: - CC = ISD::SETGE; - break; - case ISD::SETUGE: - Invert = true; - CC = ISD::SETLT; - SwapIfRegArgs = true; - break; - case ISD::SETULE: - Invert = true; - CC = ISD::SETGT; - break; - case ISD::SETUGT: - Invert = true; - CC = ISD::SETLE; - SwapIfRegArgs = true; - break; - case ISD::SETULT: - Invert = true; - CC = ISD::SETGE; - break; - case ISD::SETUEQ: - Invert = true; // Fallthrough - case ISD::SETONE: - // Expand this to (OGT |OLT). - NeonCmpAlt = - DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT)); - CC = ISD::SETLT; - SwapIfRegArgs = true; - break; - case ISD::SETUO: - Invert = true; // Fallthrough - case ISD::SETO: - // Expand this to (OGE | OLT). - NeonCmpAlt = - DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE)); - CC = ISD::SETLT; - SwapIfRegArgs = true; - break; - } + // While there is no integer popcount instruction, it can + // be more efficiently lowered to the following sequence that uses + // AdvSIMD registers/instructions as long as the copies to/from + // the AdvSIMD registers are cheap. + // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd + // CNT V0.8B, V0.8B // 8xbyte pop-counts + // ADDV B0, V0.8B // sum 8xbyte pop-counts + // UMOV X0, V0.B[0] // copy byte result back to integer reg + SDValue Val = Op.getOperand(0); + SDLoc DL(Op); + EVT VT = Op.getValueType(); + SDValue ZeroVec = DAG.getUNDEF(MVT::v8i8); - if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) { - CC = getSetCCSwappedOperands(CC); - std::swap(Op0, Op1); + SDValue VecVal; + if (VT == MVT::i32) { + VecVal = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val); + VecVal = DAG.getTargetInsertSubreg(AArch64::ssub, DL, MVT::v8i8, ZeroVec, + VecVal); + } else { + VecVal = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val); } - // Generate FP Compare Mask instr or FP Compare Mask against Zero instr - SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); + SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, VecVal); + SDValue UaddLV = DAG.getNode( + ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, + DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, MVT::i32), CtPop); - if (NeonCmpAlt.getNode()) - NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt); + if (VT == MVT::i64) + UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV); + return UaddLV; +} - if (Invert) - NeonCmp = DAG.getNOT(DL, NeonCmp, VT); +SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { - return NeonCmp; -} + if (Op.getValueType().isVector()) + return LowerVSETCC(Op, DAG); -// (SETCC lhs, rhs, condcode) -SDValue -AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { - SDLoc dl(Op); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); - EVT VT = Op.getValueType(); + SDLoc dl(Op); - if (VT.isVector()) - return LowerVectorSETCC(Op, DAG); + // We chose ZeroOrOneBooleanContents, so use zero and one. + EVT VT = Op.getValueType(); + SDValue TVal = DAG.getConstant(1, VT); + SDValue FVal = DAG.getConstant(0, VT); + // Handle f128 first, since one possible outcome is a normal integer + // comparison which gets picked up by the next if statement. if (LHS.getValueType() == MVT::f128) { - // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS - // for the rest of the function (some i32 or i64 values). softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); // If softenSetCCOperands returned a scalar, use it. - if (RHS.getNode() == 0) { + if (!RHS.getNode()) { assert(LHS.getValueType() == Op.getValueType() && "Unexpected setcc expansion!"); return LHS; @@ -3021,205 +3106,403 @@ AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { } if (LHS.getValueType().isInteger()) { - SDValue A64cc; + SDValue CCVal; + SDValue Cmp = + getAArch64Cmp(LHS, RHS, ISD::getSetCCInverse(CC, true), CCVal, DAG, dl); + + // Note that we inverted the condition above, so we reverse the order of + // the true and false operands here. This will allow the setcc to be + // matched to a single CSINC instruction. + return DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp); + } + + // Now we know we're dealing with FP values. + assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); + + // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead + // and do the comparison. + SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); - // Integers are handled in a separate function because the combinations of - // immediates and tests can get hairy and we may want to fiddle things. - SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); + AArch64CC::CondCode CC1, CC2; + changeFPCCToAArch64CC(CC, CC1, CC2); + if (CC2 == AArch64CC::AL) { + changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, false), CC1, CC2); + SDValue CC1Val = DAG.getConstant(CC1, MVT::i32); - return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, - CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT), - A64cc); + // Note that we inverted the condition above, so we reverse the order of + // the true and false operands here. This will allow the setcc to be + // matched to a single CSINC instruction. + return DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp); + } else { + // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't + // totally clean. Some of them require two CSELs to implement. As is in + // this case, we emit the first CSEL and then emit a second using the output + // of the first as the RHS. We're effectively OR'ing the two CC's together. + + // FIXME: It would be nice if we could match the two CSELs to two CSINCs. + SDValue CC1Val = DAG.getConstant(CC1, MVT::i32); + SDValue CS1 = + DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); + + SDValue CC2Val = DAG.getConstant(CC2, MVT::i32); + return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); } +} - // Note that some LLVM floating-point CondCodes can't be lowered to a single - // conditional branch, hence FPCCToA64CC can set a second test, where either - // passing is sufficient. - A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; - CondCode = FPCCToA64CC(CC, Alternative); - SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); - SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, - DAG.getCondCode(CC)); - SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, - CmpOp, DAG.getConstant(1, VT), - DAG.getConstant(0, VT), A64cc); +/// A SELECT_CC operation is really some kind of max or min if both values being +/// compared are, in some sense, equal to the results in either case. However, +/// it is permissible to compare f32 values and produce directly extended f64 +/// values. +/// +/// Extending the comparison operands would also be allowed, but is less likely +/// to happen in practice since their use is right here. Note that truncate +/// operations would *not* be semantically equivalent. +static bool selectCCOpsAreFMaxCompatible(SDValue Cmp, SDValue Result) { + if (Cmp == Result) + return true; - if (Alternative != A64CC::Invalid) { - A64cc = DAG.getConstant(Alternative, MVT::i32); - A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp, - DAG.getConstant(1, VT), A64SELECT_CC, A64cc); + ConstantFPSDNode *CCmp = dyn_cast<ConstantFPSDNode>(Cmp); + ConstantFPSDNode *CResult = dyn_cast<ConstantFPSDNode>(Result); + if (CCmp && CResult && Cmp.getValueType() == MVT::f32 && + Result.getValueType() == MVT::f64) { + bool Lossy; + APFloat CmpVal = CCmp->getValueAPF(); + CmpVal.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &Lossy); + return CResult->getValueAPF().bitwiseIsEqual(CmpVal); } - return A64SELECT_CC; + return Result->getOpcode() == ISD::FP_EXTEND && Result->getOperand(0) == Cmp; } -static SDValue LowerVectorSELECT_CC(SDValue Op, SelectionDAG &DAG) { - SDLoc dl(Op); - SDValue LHS = Op.getOperand(0); - SDValue RHS = Op.getOperand(1); - SDValue IfTrue = Op.getOperand(2); - SDValue IfFalse = Op.getOperand(3); - EVT IfTrueVT = IfTrue.getValueType(); - EVT CondVT = IfTrueVT.changeVectorElementTypeToInteger(); - ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); +SDValue AArch64TargetLowering::LowerSELECT(SDValue Op, + SelectionDAG &DAG) const { + SDValue CC = Op->getOperand(0); + SDValue TVal = Op->getOperand(1); + SDValue FVal = Op->getOperand(2); + SDLoc DL(Op); - // If LHS & RHS are floating point and IfTrue & IfFalse are vectors, we will - // use NEON compare. - if ((LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64)) { - EVT EltVT = LHS.getValueType(); - unsigned EltNum = 128 / EltVT.getSizeInBits(); - EVT VT = EVT::getVectorVT(*DAG.getContext(), EltVT, EltNum); - unsigned SubConstant = - (LHS.getValueType() == MVT::f32) ? AArch64::sub_32 :AArch64::sub_64; - EVT CEltT = (LHS.getValueType() == MVT::f32) ? MVT::i32 : MVT::i64; - EVT CVT = EVT::getVectorVT(*DAG.getContext(), CEltT, EltNum); - - LHS - = SDValue(DAG.getMachineNode(TargetOpcode::SUBREG_TO_REG, dl, - VT, DAG.getTargetConstant(0, MVT::i32), LHS, - DAG.getTargetConstant(SubConstant, MVT::i32)), 0); - RHS - = SDValue(DAG.getMachineNode(TargetOpcode::SUBREG_TO_REG, dl, - VT, DAG.getTargetConstant(0, MVT::i32), RHS, - DAG.getTargetConstant(SubConstant, MVT::i32)), 0); - - SDValue VSetCC = DAG.getSetCC(dl, CVT, LHS, RHS, CC); - SDValue ResCC = LowerVectorSETCC(VSetCC, DAG); - if (CEltT.getSizeInBits() < IfTrueVT.getSizeInBits()) { - EVT DUPVT = - EVT::getVectorVT(*DAG.getContext(), CEltT, - IfTrueVT.getSizeInBits() / CEltT.getSizeInBits()); - ResCC = DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, DUPVT, ResCC, - DAG.getConstant(0, MVT::i64, false)); - - ResCC = DAG.getNode(ISD::BITCAST, dl, CondVT, ResCC); - } else { - // FIXME: If IfTrue & IfFalse hold v1i8, v1i16 or v1i32, this function - // can't handle them and will hit this assert. - assert(CEltT.getSizeInBits() == IfTrueVT.getSizeInBits() && - "Vector of IfTrue & IfFalse is too small."); - - unsigned ExEltNum = - EltNum * IfTrueVT.getSizeInBits() / ResCC.getValueSizeInBits(); - EVT ExVT = EVT::getVectorVT(*DAG.getContext(), CEltT, ExEltNum); - ResCC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ExVT, ResCC, - DAG.getConstant(0, MVT::i64, false)); - ResCC = DAG.getNode(ISD::BITCAST, dl, CondVT, ResCC); - } - SDValue VSelect = DAG.getNode(ISD::VSELECT, dl, IfTrue.getValueType(), - ResCC, IfTrue, IfFalse); - return VSelect; - } - - // Here we handle the case that LHS & RHS are integer and IfTrue & IfFalse are - // vectors. - A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; - CondCode = FPCCToA64CC(CC, Alternative); - SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); - SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, - DAG.getCondCode(CC)); - EVT SEVT = MVT::i32; - if (IfTrue.getValueType().getVectorElementType().getSizeInBits() > 32) - SEVT = MVT::i64; - SDValue AllOne = DAG.getConstant(-1, SEVT); - SDValue AllZero = DAG.getConstant(0, SEVT); - SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, SEVT, SetCC, - AllOne, AllZero, A64cc); - - if (Alternative != A64CC::Invalid) { - A64cc = DAG.getConstant(Alternative, MVT::i32); - A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), - SetCC, AllOne, A64SELECT_CC, A64cc); - } - SDValue VDup; - if (IfTrue.getValueType().getVectorNumElements() == 1) - VDup = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, CondVT, A64SELECT_CC); + unsigned Opc = CC.getOpcode(); + // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select + // instruction. + if (CC.getResNo() == 1 && + (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || + Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)) { + // Only lower legal XALUO ops. + if (!DAG.getTargetLoweringInfo().isTypeLegal(CC->getValueType(0))) + return SDValue(); + + AArch64CC::CondCode OFCC; + SDValue Value, Overflow; + std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CC.getValue(0), DAG); + SDValue CCVal = DAG.getConstant(OFCC, MVT::i32); + + return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal, + CCVal, Overflow); + } + + if (CC.getOpcode() == ISD::SETCC) + return DAG.getSelectCC(DL, CC.getOperand(0), CC.getOperand(1), TVal, FVal, + cast<CondCodeSDNode>(CC.getOperand(2))->get()); else - VDup = DAG.getNode(AArch64ISD::NEON_VDUP, dl, CondVT, A64SELECT_CC); - SDValue VSelect = DAG.getNode(ISD::VSELECT, dl, IfTrue.getValueType(), - VDup, IfTrue, IfFalse); - return VSelect; + return DAG.getSelectCC(DL, CC, DAG.getConstant(0, CC.getValueType()), TVal, + FVal, ISD::SETNE); } -// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode) -SDValue -AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { - SDLoc dl(Op); +SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op, + SelectionDAG &DAG) const { + ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); - SDValue IfTrue = Op.getOperand(2); - SDValue IfFalse = Op.getOperand(3); - ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); - - if (IfTrue.getValueType().isVector()) - return LowerVectorSELECT_CC(Op, DAG); + SDValue TVal = Op.getOperand(2); + SDValue FVal = Op.getOperand(3); + SDLoc dl(Op); + // Handle f128 first, because it will result in a comparison of some RTLIB + // call result against zero. if (LHS.getValueType() == MVT::f128) { - // f128 comparisons are lowered to libcalls, but slot in nicely here - // afterwards. softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); // If softenSetCCOperands returned a scalar, we need to compare the result // against zero to select between true and false values. - if (RHS.getNode() == 0) { + if (!RHS.getNode()) { RHS = DAG.getConstant(0, LHS.getValueType()); CC = ISD::SETNE; } } + // Handle integers first. if (LHS.getValueType().isInteger()) { - SDValue A64cc; + assert((LHS.getValueType() == RHS.getValueType()) && + (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)); + + unsigned Opcode = AArch64ISD::CSEL; + + // If both the TVal and the FVal are constants, see if we can swap them in + // order to for a CSINV or CSINC out of them. + ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal); + ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal); + + if (CTVal && CFVal && CTVal->isAllOnesValue() && CFVal->isNullValue()) { + std::swap(TVal, FVal); + std::swap(CTVal, CFVal); + CC = ISD::getSetCCInverse(CC, true); + } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isNullValue()) { + std::swap(TVal, FVal); + std::swap(CTVal, CFVal); + CC = ISD::getSetCCInverse(CC, true); + } else if (TVal.getOpcode() == ISD::XOR) { + // If TVal is a NOT we want to swap TVal and FVal so that we can match + // with a CSINV rather than a CSEL. + ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(TVal.getOperand(1)); + + if (CVal && CVal->isAllOnesValue()) { + std::swap(TVal, FVal); + std::swap(CTVal, CFVal); + CC = ISD::getSetCCInverse(CC, true); + } + } else if (TVal.getOpcode() == ISD::SUB) { + // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so + // that we can match with a CSNEG rather than a CSEL. + ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(TVal.getOperand(0)); + + if (CVal && CVal->isNullValue()) { + std::swap(TVal, FVal); + std::swap(CTVal, CFVal); + CC = ISD::getSetCCInverse(CC, true); + } + } else if (CTVal && CFVal) { + const int64_t TrueVal = CTVal->getSExtValue(); + const int64_t FalseVal = CFVal->getSExtValue(); + bool Swap = false; - // Integers are handled in a separate function because the combinations of - // immediates and tests can get hairy and we may want to fiddle things. - SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); + // If both TVal and FVal are constants, see if FVal is the + // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC + // instead of a CSEL in that case. + if (TrueVal == ~FalseVal) { + Opcode = AArch64ISD::CSINV; + } else if (TrueVal == -FalseVal) { + Opcode = AArch64ISD::CSNEG; + } else if (TVal.getValueType() == MVT::i32) { + // If our operands are only 32-bit wide, make sure we use 32-bit + // arithmetic for the check whether we can use CSINC. This ensures that + // the addition in the check will wrap around properly in case there is + // an overflow (which would not be the case if we do the check with + // 64-bit arithmetic). + const uint32_t TrueVal32 = CTVal->getZExtValue(); + const uint32_t FalseVal32 = CFVal->getZExtValue(); + + if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) { + Opcode = AArch64ISD::CSINC; + + if (TrueVal32 > FalseVal32) { + Swap = true; + } + } + // 64-bit check whether we can use CSINC. + } else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) { + Opcode = AArch64ISD::CSINC; + + if (TrueVal > FalseVal) { + Swap = true; + } + } + + // Swap TVal and FVal if necessary. + if (Swap) { + std::swap(TVal, FVal); + std::swap(CTVal, CFVal); + CC = ISD::getSetCCInverse(CC, true); + } + + if (Opcode != AArch64ISD::CSEL) { + // Drop FVal since we can get its value by simply inverting/negating + // TVal. + FVal = TVal; + } + } + + SDValue CCVal; + SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); - return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), CmpOp, - IfTrue, IfFalse, A64cc); + EVT VT = Op.getValueType(); + return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp); } - // Note that some LLVM floating-point CondCodes can't be lowered to a single - // conditional branch, hence FPCCToA64CC can set a second test, where either - // passing is sufficient. - A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; - CondCode = FPCCToA64CC(CC, Alternative); - SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); - SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, - DAG.getCondCode(CC)); - SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, - Op.getValueType(), - SetCC, IfTrue, IfFalse, A64cc); + // Now we know we're dealing with FP values. + assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); + assert(LHS.getValueType() == RHS.getValueType()); + EVT VT = Op.getValueType(); + + // Try to match this select into a max/min operation, which have dedicated + // opcode in the instruction set. + // FIXME: This is not correct in the presence of NaNs, so we only enable this + // in no-NaNs mode. + if (getTargetMachine().Options.NoNaNsFPMath) { + SDValue MinMaxLHS = TVal, MinMaxRHS = FVal; + if (selectCCOpsAreFMaxCompatible(LHS, MinMaxRHS) && + selectCCOpsAreFMaxCompatible(RHS, MinMaxLHS)) { + CC = ISD::getSetCCSwappedOperands(CC); + std::swap(MinMaxLHS, MinMaxRHS); + } - if (Alternative != A64CC::Invalid) { - A64cc = DAG.getConstant(Alternative, MVT::i32); - A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), - SetCC, IfTrue, A64SELECT_CC, A64cc); + if (selectCCOpsAreFMaxCompatible(LHS, MinMaxLHS) && + selectCCOpsAreFMaxCompatible(RHS, MinMaxRHS)) { + switch (CC) { + default: + break; + case ISD::SETGT: + case ISD::SETGE: + case ISD::SETUGT: + case ISD::SETUGE: + case ISD::SETOGT: + case ISD::SETOGE: + return DAG.getNode(AArch64ISD::FMAX, dl, VT, MinMaxLHS, MinMaxRHS); + break; + case ISD::SETLT: + case ISD::SETLE: + case ISD::SETULT: + case ISD::SETULE: + case ISD::SETOLT: + case ISD::SETOLE: + return DAG.getNode(AArch64ISD::FMIN, dl, VT, MinMaxLHS, MinMaxRHS); + break; + } + } + } + // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead + // and do the comparison. + SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); + + // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally + // clean. Some of them require two CSELs to implement. + AArch64CC::CondCode CC1, CC2; + changeFPCCToAArch64CC(CC, CC1, CC2); + SDValue CC1Val = DAG.getConstant(CC1, MVT::i32); + SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); + + // If we need a second CSEL, emit it, using the output of the first as the + // RHS. We're effectively OR'ing the two CC's together. + if (CC2 != AArch64CC::AL) { + SDValue CC2Val = DAG.getConstant(CC2, MVT::i32); + return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); } - return A64SELECT_CC; + // Otherwise, return the output of the first CSEL. + return CS1; } -SDValue -AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { - const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); - const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); +SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op, + SelectionDAG &DAG) const { + // Jump table entries as PC relative offsets. No additional tweaking + // is necessary here. Just get the address of the jump table. + JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); + EVT PtrVT = getPointerTy(); + SDLoc DL(Op); - // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes - // rather than just 8. - return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op), - Op.getOperand(1), Op.getOperand(2), - DAG.getConstant(32, MVT::i32), 8, false, false, - MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); + if (getTargetMachine().getCodeModel() == CodeModel::Large && + !Subtarget->isTargetMachO()) { + const unsigned char MO_NC = AArch64II::MO_NC; + return DAG.getNode( + AArch64ISD::WrapperLarge, DL, PtrVT, + DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_G3), + DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_G2 | MO_NC), + DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_G1 | MO_NC), + DAG.getTargetJumpTable(JT->getIndex(), PtrVT, + AArch64II::MO_G0 | MO_NC)); + } + + SDValue Hi = + DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_PAGE); + SDValue Lo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, + AArch64II::MO_PAGEOFF | AArch64II::MO_NC); + SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi); + return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo); } -SDValue -AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { +SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op, + SelectionDAG &DAG) const { + ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); + EVT PtrVT = getPointerTy(); + SDLoc DL(Op); + + if (getTargetMachine().getCodeModel() == CodeModel::Large) { + // Use the GOT for the large code model on iOS. + if (Subtarget->isTargetMachO()) { + SDValue GotAddr = DAG.getTargetConstantPool( + CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), + AArch64II::MO_GOT); + return DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, GotAddr); + } + + const unsigned char MO_NC = AArch64II::MO_NC; + return DAG.getNode( + AArch64ISD::WrapperLarge, DL, PtrVT, + DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(), + CP->getOffset(), AArch64II::MO_G3), + DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(), + CP->getOffset(), AArch64II::MO_G2 | MO_NC), + DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(), + CP->getOffset(), AArch64II::MO_G1 | MO_NC), + DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(), + CP->getOffset(), AArch64II::MO_G0 | MO_NC)); + } else { + // Use ADRP/ADD or ADRP/LDR for everything else: the small memory model on + // ELF, the only valid one on Darwin. + SDValue Hi = + DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(), + CP->getOffset(), AArch64II::MO_PAGE); + SDValue Lo = DAG.getTargetConstantPool( + CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), + AArch64II::MO_PAGEOFF | AArch64II::MO_NC); + + SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi); + return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo); + } +} + +SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op, + SelectionDAG &DAG) const { + const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); + EVT PtrVT = getPointerTy(); + SDLoc DL(Op); + if (getTargetMachine().getCodeModel() == CodeModel::Large && + !Subtarget->isTargetMachO()) { + const unsigned char MO_NC = AArch64II::MO_NC; + return DAG.getNode( + AArch64ISD::WrapperLarge, DL, PtrVT, + DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G3), + DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G2 | MO_NC), + DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G1 | MO_NC), + DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G0 | MO_NC)); + } else { + SDValue Hi = DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_PAGE); + SDValue Lo = DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_PAGEOFF | + AArch64II::MO_NC); + SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi); + return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo); + } +} + +SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op, + SelectionDAG &DAG) const { + AArch64FunctionInfo *FuncInfo = + DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); + + SDLoc DL(Op); + SDValue FR = + DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), getPointerTy()); + const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); + return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), + MachinePointerInfo(SV), false, false, 0); +} + +SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op, + SelectionDAG &DAG) const { // The layout of the va_list struct is specified in the AArch64 Procedure Call // Standard, section B.3. MachineFunction &MF = DAG.getMachineFunction(); - AArch64MachineFunctionInfo *FuncInfo - = MF.getInfo<AArch64MachineFunctionInfo>(); + AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); SDLoc DL(Op); SDValue Chain = Op.getOperand(0); @@ -3228,498 +3511,2894 @@ AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { SmallVector<SDValue, 4> MemOps; // void *__stack at offset 0 - SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(), - getPointerTy()); + SDValue Stack = + DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), getPointerTy()); MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList, - MachinePointerInfo(SV), false, false, 0)); + MachinePointerInfo(SV), false, false, 8)); // void *__gr_top at offset 8 - int GPRSize = FuncInfo->getVariadicGPRSize(); + int GPRSize = FuncInfo->getVarArgsGPRSize(); if (GPRSize > 0) { SDValue GRTop, GRTopAddr; GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, DAG.getConstant(8, getPointerTy())); - GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy()); + GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), getPointerTy()); GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop, DAG.getConstant(GPRSize, getPointerTy())); MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr, - MachinePointerInfo(SV, 8), - false, false, 0)); + MachinePointerInfo(SV, 8), false, false, 8)); } // void *__vr_top at offset 16 - int FPRSize = FuncInfo->getVariadicFPRSize(); + int FPRSize = FuncInfo->getVarArgsFPRSize(); if (FPRSize > 0) { SDValue VRTop, VRTopAddr; VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, DAG.getConstant(16, getPointerTy())); - VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy()); + VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), getPointerTy()); VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop, DAG.getConstant(FPRSize, getPointerTy())); MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr, - MachinePointerInfo(SV, 16), - false, false, 0)); + MachinePointerInfo(SV, 16), false, false, 8)); } // int __gr_offs at offset 24 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, DAG.getConstant(24, getPointerTy())); MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32), - GROffsAddr, MachinePointerInfo(SV, 24), - false, false, 0)); + GROffsAddr, MachinePointerInfo(SV, 24), false, + false, 4)); // int __vr_offs at offset 28 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, DAG.getConstant(28, getPointerTy())); MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32), - VROffsAddr, MachinePointerInfo(SV, 28), - false, false, 0)); + VROffsAddr, MachinePointerInfo(SV, 28), false, + false, 4)); - return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], - MemOps.size()); + return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); } -SDValue -AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { - switch (Op.getOpcode()) { - default: llvm_unreachable("Don't know how to custom lower this!"); - case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128); - case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128); - case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128); - case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128); - case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true); - case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false); - case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true); - case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false); - case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); - case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); - case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); - case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); - - case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); - case ISD::SRL_PARTS: - case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); - - case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); - case ISD::BRCOND: return LowerBRCOND(Op, DAG); - case ISD::BR_CC: return LowerBR_CC(Op, DAG); - case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG); - case ISD::ConstantPool: return LowerConstantPool(Op, DAG); - case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); - case ISD::JumpTable: return LowerJumpTable(Op, DAG); - case ISD::SELECT: return LowerSELECT(Op, DAG); - case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); - case ISD::SETCC: return LowerSETCC(Op, DAG); - case ISD::VACOPY: return LowerVACOPY(Op, DAG); - case ISD::VASTART: return LowerVASTART(Op, DAG); - case ISD::BUILD_VECTOR: - return LowerBUILD_VECTOR(Op, DAG, getSubtarget()); - case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); - case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); +SDValue AArch64TargetLowering::LowerVASTART(SDValue Op, + SelectionDAG &DAG) const { + return Subtarget->isTargetDarwin() ? LowerDarwin_VASTART(Op, DAG) + : LowerAAPCS_VASTART(Op, DAG); +} + +SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op, + SelectionDAG &DAG) const { + // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single + // pointer. + unsigned VaListSize = Subtarget->isTargetDarwin() ? 8 : 32; + const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); + const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); + + return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op), Op.getOperand(1), + Op.getOperand(2), DAG.getConstant(VaListSize, MVT::i32), + 8, false, false, MachinePointerInfo(DestSV), + MachinePointerInfo(SrcSV)); +} + +SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { + assert(Subtarget->isTargetDarwin() && + "automatic va_arg instruction only works on Darwin"); + + const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); + EVT VT = Op.getValueType(); + SDLoc DL(Op); + SDValue Chain = Op.getOperand(0); + SDValue Addr = Op.getOperand(1); + unsigned Align = Op.getConstantOperandVal(3); + + SDValue VAList = DAG.getLoad(getPointerTy(), DL, Chain, Addr, + MachinePointerInfo(V), false, false, false, 0); + Chain = VAList.getValue(1); + + if (Align > 8) { + assert(((Align & (Align - 1)) == 0) && "Expected Align to be a power of 2"); + VAList = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, + DAG.getConstant(Align - 1, getPointerTy())); + VAList = DAG.getNode(ISD::AND, DL, getPointerTy(), VAList, + DAG.getConstant(-(int64_t)Align, getPointerTy())); } - return SDValue(); + Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); + uint64_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy); + + // Scalar integer and FP values smaller than 64 bits are implicitly extended + // up to 64 bits. At the very least, we have to increase the striding of the + // vaargs list to match this, and for FP values we need to introduce + // FP_ROUND nodes as well. + if (VT.isInteger() && !VT.isVector()) + ArgSize = 8; + bool NeedFPTrunc = false; + if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) { + ArgSize = 8; + NeedFPTrunc = true; + } + + // Increment the pointer, VAList, to the next vaarg + SDValue VANext = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, + DAG.getConstant(ArgSize, getPointerTy())); + // Store the incremented VAList to the legalized pointer + SDValue APStore = DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V), + false, false, 0); + + // Load the actual argument out of the pointer VAList + if (NeedFPTrunc) { + // Load the value as an f64. + SDValue WideFP = DAG.getLoad(MVT::f64, DL, APStore, VAList, + MachinePointerInfo(), false, false, false, 0); + // Round the value down to an f32. + SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0), + DAG.getIntPtrConstant(1)); + SDValue Ops[] = { NarrowFP, WideFP.getValue(1) }; + // Merge the rounded value with the chain output of the load. + return DAG.getMergeValues(Ops, DL); + } + + return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo(), false, + false, false, 0); } -/// Check if the specified splat value corresponds to a valid vector constant -/// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If -/// so, return the encoded 8-bit immediate and the OpCmode instruction fields -/// values. -static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, - unsigned SplatBitSize, SelectionDAG &DAG, - bool is128Bits, NeonModImmType type, EVT &VT, - unsigned &Imm, unsigned &OpCmode) { - switch (SplatBitSize) { +SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, + SelectionDAG &DAG) const { + MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); + MFI->setFrameAddressIsTaken(true); + + EVT VT = Op.getValueType(); + SDLoc DL(Op); + unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); + SDValue FrameAddr = + DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT); + while (Depth--) + FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr, + MachinePointerInfo(), false, false, false, 0); + return FrameAddr; +} + +// FIXME? Maybe this could be a TableGen attribute on some registers and +// this table could be generated automatically from RegInfo. +unsigned AArch64TargetLowering::getRegisterByName(const char* RegName, + EVT VT) const { + unsigned Reg = StringSwitch<unsigned>(RegName) + .Case("sp", AArch64::SP) + .Default(0); + if (Reg) + return Reg; + report_fatal_error("Invalid register name global variable"); +} + +SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, + SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MFI->setReturnAddressIsTaken(true); + + EVT VT = Op.getValueType(); + SDLoc DL(Op); + unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); + if (Depth) { + SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); + SDValue Offset = DAG.getConstant(8, getPointerTy()); + return DAG.getLoad(VT, DL, DAG.getEntryNode(), + DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), + MachinePointerInfo(), false, false, false, 0); + } + + // Return LR, which contains the return address. Mark it an implicit live-in. + unsigned Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass); + return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); +} + +/// LowerShiftRightParts - Lower SRA_PARTS, which returns two +/// i64 values and take a 2 x i64 value to shift plus a shift amount. +SDValue AArch64TargetLowering::LowerShiftRightParts(SDValue Op, + SelectionDAG &DAG) const { + assert(Op.getNumOperands() == 3 && "Not a double-shift!"); + EVT VT = Op.getValueType(); + unsigned VTBits = VT.getSizeInBits(); + SDLoc dl(Op); + SDValue ShOpLo = Op.getOperand(0); + SDValue ShOpHi = Op.getOperand(1); + SDValue ShAmt = Op.getOperand(2); + SDValue ARMcc; + unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; + + assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); + + SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, + DAG.getConstant(VTBits, MVT::i64), ShAmt); + SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); + SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt, + DAG.getConstant(VTBits, MVT::i64)); + SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); + + SDValue Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, MVT::i64), + ISD::SETGE, dl, DAG); + SDValue CCVal = DAG.getConstant(AArch64CC::GE, MVT::i32); + + SDValue FalseValLo = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); + SDValue TrueValLo = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); + SDValue Lo = + DAG.getNode(AArch64ISD::CSEL, dl, VT, TrueValLo, FalseValLo, CCVal, Cmp); + + // AArch64 shifts larger than the register width are wrapped rather than + // clamped, so we can't just emit "hi >> x". + SDValue FalseValHi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); + SDValue TrueValHi = Opc == ISD::SRA + ? DAG.getNode(Opc, dl, VT, ShOpHi, + DAG.getConstant(VTBits - 1, MVT::i64)) + : DAG.getConstant(0, VT); + SDValue Hi = + DAG.getNode(AArch64ISD::CSEL, dl, VT, TrueValHi, FalseValHi, CCVal, Cmp); + + SDValue Ops[2] = { Lo, Hi }; + return DAG.getMergeValues(Ops, dl); +} + +/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two +/// i64 values and take a 2 x i64 value to shift plus a shift amount. +SDValue AArch64TargetLowering::LowerShiftLeftParts(SDValue Op, + SelectionDAG &DAG) const { + assert(Op.getNumOperands() == 3 && "Not a double-shift!"); + EVT VT = Op.getValueType(); + unsigned VTBits = VT.getSizeInBits(); + SDLoc dl(Op); + SDValue ShOpLo = Op.getOperand(0); + SDValue ShOpHi = Op.getOperand(1); + SDValue ShAmt = Op.getOperand(2); + SDValue ARMcc; + + assert(Op.getOpcode() == ISD::SHL_PARTS); + SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, + DAG.getConstant(VTBits, MVT::i64), ShAmt); + SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); + SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt, + DAG.getConstant(VTBits, MVT::i64)); + SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); + SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); + + SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); + + SDValue Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, MVT::i64), + ISD::SETGE, dl, DAG); + SDValue CCVal = DAG.getConstant(AArch64CC::GE, MVT::i32); + SDValue Hi = + DAG.getNode(AArch64ISD::CSEL, dl, VT, Tmp3, FalseVal, CCVal, Cmp); + + // AArch64 shifts of larger than register sizes are wrapped rather than + // clamped, so we can't just emit "lo << a" if a is too big. + SDValue TrueValLo = DAG.getConstant(0, VT); + SDValue FalseValLo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); + SDValue Lo = + DAG.getNode(AArch64ISD::CSEL, dl, VT, TrueValLo, FalseValLo, CCVal, Cmp); + + SDValue Ops[2] = { Lo, Hi }; + return DAG.getMergeValues(Ops, dl); +} + +bool AArch64TargetLowering::isOffsetFoldingLegal( + const GlobalAddressSDNode *GA) const { + // The AArch64 target doesn't support folding offsets into global addresses. + return false; +} + +bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { + // We can materialize #0.0 as fmov $Rd, XZR for 64-bit and 32-bit cases. + // FIXME: We should be able to handle f128 as well with a clever lowering. + if (Imm.isPosZero() && (VT == MVT::f64 || VT == MVT::f32)) + return true; + + if (VT == MVT::f64) + return AArch64_AM::getFP64Imm(Imm) != -1; + else if (VT == MVT::f32) + return AArch64_AM::getFP32Imm(Imm) != -1; + return false; +} + +//===----------------------------------------------------------------------===// +// AArch64 Optimization Hooks +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// AArch64 Inline Assembly Support +//===----------------------------------------------------------------------===// + +// Table of Constraints +// TODO: This is the current set of constraints supported by ARM for the +// compiler, not all of them may make sense, e.g. S may be difficult to support. +// +// r - A general register +// w - An FP/SIMD register of some size in the range v0-v31 +// x - An FP/SIMD register of some size in the range v0-v15 +// I - Constant that can be used with an ADD instruction +// J - Constant that can be used with a SUB instruction +// K - Constant that can be used with a 32-bit logical instruction +// L - Constant that can be used with a 64-bit logical instruction +// M - Constant that can be used as a 32-bit MOV immediate +// N - Constant that can be used as a 64-bit MOV immediate +// Q - A memory reference with base register and no offset +// S - A symbolic address +// Y - Floating point constant zero +// Z - Integer constant zero +// +// Note that general register operands will be output using their 64-bit x +// register name, whatever the size of the variable, unless the asm operand +// is prefixed by the %w modifier. Floating-point and SIMD register operands +// will be output with the v prefix unless prefixed by the %b, %h, %s, %d or +// %q modifier. + +/// getConstraintType - Given a constraint letter, return the type of +/// constraint it is for this target. +AArch64TargetLowering::ConstraintType +AArch64TargetLowering::getConstraintType(const std::string &Constraint) const { + if (Constraint.size() == 1) { + switch (Constraint[0]) { + default: + break; + case 'z': + return C_Other; + case 'x': + case 'w': + return C_RegisterClass; + // An address with a single base register. Due to the way we + // currently handle addresses it is the same as 'r'. + case 'Q': + return C_Memory; + } + } + return TargetLowering::getConstraintType(Constraint); +} + +/// Examine constraint type and operand type and determine a weight value. +/// This object must already have been set up with the operand type +/// and the current alternative constraint selected. +TargetLowering::ConstraintWeight +AArch64TargetLowering::getSingleConstraintMatchWeight( + AsmOperandInfo &info, const char *constraint) const { + ConstraintWeight weight = CW_Invalid; + Value *CallOperandVal = info.CallOperandVal; + // If we don't have a value, we can't do a match, + // but allow it at the lowest weight. + if (!CallOperandVal) + return CW_Default; + Type *type = CallOperandVal->getType(); + // Look at the constraint type. + switch (*constraint) { default: - llvm_unreachable("unexpected size for isNeonModifiedImm"); - case 8: { - if (type != Neon_Mov_Imm) - return false; - assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); - // Neon movi per byte: Op=0, Cmode=1110. - OpCmode = 0xe; - Imm = SplatBits; - VT = is128Bits ? MVT::v16i8 : MVT::v8i8; + weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); + break; + case 'x': + case 'w': + if (type->isFloatingPointTy() || type->isVectorTy()) + weight = CW_Register; + break; + case 'z': + weight = CW_Constant; break; } - case 16: { - // Neon move inst per halfword - VT = is128Bits ? MVT::v8i16 : MVT::v4i16; - if ((SplatBits & ~0xff) == 0) { - // Value = 0x00nn is 0x00nn LSL 0 - // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000 - // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001 - // Op=x, Cmode=100y - Imm = SplatBits; - OpCmode = 0x8; + return weight; +} + +std::pair<unsigned, const TargetRegisterClass *> +AArch64TargetLowering::getRegForInlineAsmConstraint( + const std::string &Constraint, MVT VT) const { + if (Constraint.size() == 1) { + switch (Constraint[0]) { + case 'r': + if (VT.getSizeInBits() == 64) + return std::make_pair(0U, &AArch64::GPR64commonRegClass); + return std::make_pair(0U, &AArch64::GPR32commonRegClass); + case 'w': + if (VT == MVT::f32) + return std::make_pair(0U, &AArch64::FPR32RegClass); + if (VT.getSizeInBits() == 64) + return std::make_pair(0U, &AArch64::FPR64RegClass); + if (VT.getSizeInBits() == 128) + return std::make_pair(0U, &AArch64::FPR128RegClass); break; - } - if ((SplatBits & ~0xff00) == 0) { - // Value = 0xnn00 is 0x00nn LSL 8 - // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010 - // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011 - // Op=x, Cmode=101x - Imm = SplatBits >> 8; - OpCmode = 0xa; + // The instructions that this constraint is designed for can + // only take 128-bit registers so just use that regclass. + case 'x': + if (VT.getSizeInBits() == 128) + return std::make_pair(0U, &AArch64::FPR128_loRegClass); break; } - // can't handle any other - return false; } + if (StringRef("{cc}").equals_lower(Constraint)) + return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass); - case 32: { - // First the LSL variants (MSL is unusable by some interested instructions). + // Use the default implementation in TargetLowering to convert the register + // constraint into a member of a register class. + std::pair<unsigned, const TargetRegisterClass *> Res; + Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); + + // Not found as a standard register? + if (!Res.second) { + unsigned Size = Constraint.size(); + if ((Size == 4 || Size == 5) && Constraint[0] == '{' && + tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') { + const std::string Reg = + std::string(&Constraint[2], &Constraint[Size - 1]); + int RegNo = atoi(Reg.c_str()); + if (RegNo >= 0 && RegNo <= 31) { + // v0 - v31 are aliases of q0 - q31. + // By default we'll emit v0-v31 for this unless there's a modifier where + // we'll emit the correct register as well. + Res.first = AArch64::FPR128RegClass.getRegister(RegNo); + Res.second = &AArch64::FPR128RegClass; + } + } + } - // Neon move instr per word, shift zeros - VT = is128Bits ? MVT::v4i32 : MVT::v2i32; - if ((SplatBits & ~0xff) == 0) { - // Value = 0x000000nn is 0x000000nn LSL 0 - // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000 - // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001 - // Op=x, Cmode=000x - Imm = SplatBits; - OpCmode = 0; - break; + return Res; +} + +/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops +/// vector. If it is invalid, don't add anything to Ops. +void AArch64TargetLowering::LowerAsmOperandForConstraint( + SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, + SelectionDAG &DAG) const { + SDValue Result; + + // Currently only support length 1 constraints. + if (Constraint.length() != 1) + return; + + char ConstraintLetter = Constraint[0]; + switch (ConstraintLetter) { + default: + break; + + // This set of constraints deal with valid constants for various instructions. + // Validate and return a target constant for them if we can. + case 'z': { + // 'z' maps to xzr or wzr so it needs an input of 0. + ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); + if (!C || C->getZExtValue() != 0) + return; + + if (Op.getValueType() == MVT::i64) + Result = DAG.getRegister(AArch64::XZR, MVT::i64); + else + Result = DAG.getRegister(AArch64::WZR, MVT::i32); + break; + } + + case 'I': + case 'J': + case 'K': + case 'L': + case 'M': + case 'N': + ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); + if (!C) + return; + + // Grab the value and do some validation. + uint64_t CVal = C->getZExtValue(); + switch (ConstraintLetter) { + // The I constraint applies only to simple ADD or SUB immediate operands: + // i.e. 0 to 4095 with optional shift by 12 + // The J constraint applies only to ADD or SUB immediates that would be + // valid when negated, i.e. if [an add pattern] were to be output as a SUB + // instruction [or vice versa], in other words -1 to -4095 with optional + // left shift by 12. + case 'I': + if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal)) + break; + return; + case 'J': { + uint64_t NVal = -C->getSExtValue(); + if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) + break; + return; } - if ((SplatBits & ~0xff00) == 0) { - // Value = 0x0000nn00 is 0x000000nn LSL 8 - // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010 - // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011 - // Op=x, Cmode=001x - Imm = SplatBits >> 8; - OpCmode = 0x2; - break; + // The K and L constraints apply *only* to logical immediates, including + // what used to be the MOVI alias for ORR (though the MOVI alias has now + // been removed and MOV should be used). So these constraints have to + // distinguish between bit patterns that are valid 32-bit or 64-bit + // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but + // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice + // versa. + case 'K': + if (AArch64_AM::isLogicalImmediate(CVal, 32)) + break; + return; + case 'L': + if (AArch64_AM::isLogicalImmediate(CVal, 64)) + break; + return; + // The M and N constraints are a superset of K and L respectively, for use + // with the MOV (immediate) alias. As well as the logical immediates they + // also match 32 or 64-bit immediates that can be loaded either using a + // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca + // (M) or 64-bit 0x1234000000000000 (N) etc. + // As a note some of this code is liberally stolen from the asm parser. + case 'M': { + if (!isUInt<32>(CVal)) + return; + if (AArch64_AM::isLogicalImmediate(CVal, 32)) + break; + if ((CVal & 0xFFFF) == CVal) + break; + if ((CVal & 0xFFFF0000ULL) == CVal) + break; + uint64_t NCVal = ~(uint32_t)CVal; + if ((NCVal & 0xFFFFULL) == NCVal) + break; + if ((NCVal & 0xFFFF0000ULL) == NCVal) + break; + return; } - if ((SplatBits & ~0xff0000) == 0) { - // Value = 0x00nn0000 is 0x000000nn LSL 16 - // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100 - // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101 - // Op=x, Cmode=010x - Imm = SplatBits >> 16; - OpCmode = 0x4; - break; + case 'N': { + if (AArch64_AM::isLogicalImmediate(CVal, 64)) + break; + if ((CVal & 0xFFFFULL) == CVal) + break; + if ((CVal & 0xFFFF0000ULL) == CVal) + break; + if ((CVal & 0xFFFF00000000ULL) == CVal) + break; + if ((CVal & 0xFFFF000000000000ULL) == CVal) + break; + uint64_t NCVal = ~CVal; + if ((NCVal & 0xFFFFULL) == NCVal) + break; + if ((NCVal & 0xFFFF0000ULL) == NCVal) + break; + if ((NCVal & 0xFFFF00000000ULL) == NCVal) + break; + if ((NCVal & 0xFFFF000000000000ULL) == NCVal) + break; + return; } - if ((SplatBits & ~0xff000000) == 0) { - // Value = 0xnn000000 is 0x000000nn LSL 24 - // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110 - // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111 - // Op=x, Cmode=011x - Imm = SplatBits >> 24; - OpCmode = 0x6; - break; + default: + return; } - // Now the MSL immediates. + // All assembler immediates are 64-bit integers. + Result = DAG.getTargetConstant(CVal, MVT::i64); + break; + } - // Neon move instr per word, shift ones - if ((SplatBits & ~0xffff) == 0 && - ((SplatBits | SplatUndef) & 0xff) == 0xff) { - // Value = 0x0000nnff is 0x000000nn MSL 8 - // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100 - // Op=x, Cmode=1100 - Imm = SplatBits >> 8; - OpCmode = 0xc; - break; + if (Result.getNode()) { + Ops.push_back(Result); + return; + } + + return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); +} + +//===----------------------------------------------------------------------===// +// AArch64 Advanced SIMD Support +//===----------------------------------------------------------------------===// + +/// WidenVector - Given a value in the V64 register class, produce the +/// equivalent value in the V128 register class. +static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) { + EVT VT = V64Reg.getValueType(); + unsigned NarrowSize = VT.getVectorNumElements(); + MVT EltTy = VT.getVectorElementType().getSimpleVT(); + MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize); + SDLoc DL(V64Reg); + + return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy), + V64Reg, DAG.getConstant(0, MVT::i32)); +} + +/// getExtFactor - Determine the adjustment factor for the position when +/// generating an "extract from vector registers" instruction. +static unsigned getExtFactor(SDValue &V) { + EVT EltType = V.getValueType().getVectorElementType(); + return EltType.getSizeInBits() / 8; +} + +/// NarrowVector - Given a value in the V128 register class, produce the +/// equivalent value in the V64 register class. +static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) { + EVT VT = V128Reg.getValueType(); + unsigned WideSize = VT.getVectorNumElements(); + MVT EltTy = VT.getVectorElementType().getSimpleVT(); + MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2); + SDLoc DL(V128Reg); + + return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg); +} + +// Gather data to see if the operation can be modelled as a +// shuffle in combination with VEXTs. +SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + EVT VT = Op.getValueType(); + unsigned NumElts = VT.getVectorNumElements(); + + SmallVector<SDValue, 2> SourceVecs; + SmallVector<unsigned, 2> MinElts; + SmallVector<unsigned, 2> MaxElts; + + for (unsigned i = 0; i < NumElts; ++i) { + SDValue V = Op.getOperand(i); + if (V.getOpcode() == ISD::UNDEF) + continue; + else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { + // A shuffle can only come from building a vector from various + // elements of other vectors. + return SDValue(); } - if ((SplatBits & ~0xffffff) == 0 && - ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { - // Value = 0x00nnffff is 0x000000nn MSL 16 - // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101 - // Op=x, Cmode=1101 - Imm = SplatBits >> 16; - OpCmode = 0xd; - break; + + // Record this extraction against the appropriate vector if possible... + SDValue SourceVec = V.getOperand(0); + unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); + bool FoundSource = false; + for (unsigned j = 0; j < SourceVecs.size(); ++j) { + if (SourceVecs[j] == SourceVec) { + if (MinElts[j] > EltNo) + MinElts[j] = EltNo; + if (MaxElts[j] < EltNo) + MaxElts[j] = EltNo; + FoundSource = true; + break; + } + } + + // Or record a new source if not... + if (!FoundSource) { + SourceVecs.push_back(SourceVec); + MinElts.push_back(EltNo); + MaxElts.push_back(EltNo); + } + } + + // Currently only do something sane when at most two source vectors + // involved. + if (SourceVecs.size() > 2) + return SDValue(); + + SDValue ShuffleSrcs[2] = { DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; + int VEXTOffsets[2] = { 0, 0 }; + + // This loop extracts the usage patterns of the source vectors + // and prepares appropriate SDValues for a shuffle if possible. + for (unsigned i = 0; i < SourceVecs.size(); ++i) { + if (SourceVecs[i].getValueType() == VT) { + // No VEXT necessary + ShuffleSrcs[i] = SourceVecs[i]; + VEXTOffsets[i] = 0; + continue; + } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { + // We can pad out the smaller vector for free, so if it's part of a + // shuffle... + ShuffleSrcs[i] = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, SourceVecs[i], + DAG.getUNDEF(SourceVecs[i].getValueType())); + continue; + } + + // Don't attempt to extract subvectors from BUILD_VECTOR sources + // that expand or trunc the original value. + // TODO: We can try to bitcast and ANY_EXTEND the result but + // we need to consider the cost of vector ANY_EXTEND, and the + // legality of all the types. + if (SourceVecs[i].getValueType().getVectorElementType() != + VT.getVectorElementType()) + return SDValue(); + + // Since only 64-bit and 128-bit vectors are legal on ARM and + // we've eliminated the other cases... + assert(SourceVecs[i].getValueType().getVectorNumElements() == 2 * NumElts && + "unexpected vector sizes in ReconstructShuffle"); + + if (MaxElts[i] - MinElts[i] >= NumElts) { + // Span too large for a VEXT to cope + return SDValue(); + } + + if (MinElts[i] >= NumElts) { + // The extraction can just take the second half + VEXTOffsets[i] = NumElts; + ShuffleSrcs[i] = + DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SourceVecs[i], + DAG.getIntPtrConstant(NumElts)); + } else if (MaxElts[i] < NumElts) { + // The extraction can just take the first half + VEXTOffsets[i] = 0; + ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, + SourceVecs[i], DAG.getIntPtrConstant(0)); + } else { + // An actual VEXT is needed + VEXTOffsets[i] = MinElts[i]; + SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, + SourceVecs[i], DAG.getIntPtrConstant(0)); + SDValue VEXTSrc2 = + DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SourceVecs[i], + DAG.getIntPtrConstant(NumElts)); + unsigned Imm = VEXTOffsets[i] * getExtFactor(VEXTSrc1); + ShuffleSrcs[i] = DAG.getNode(AArch64ISD::EXT, dl, VT, VEXTSrc1, VEXTSrc2, + DAG.getConstant(Imm, MVT::i32)); + } + } + + SmallVector<int, 8> Mask; + + for (unsigned i = 0; i < NumElts; ++i) { + SDValue Entry = Op.getOperand(i); + if (Entry.getOpcode() == ISD::UNDEF) { + Mask.push_back(-1); + continue; + } + + SDValue ExtractVec = Entry.getOperand(0); + int ExtractElt = + cast<ConstantSDNode>(Op.getOperand(i).getOperand(1))->getSExtValue(); + if (ExtractVec == SourceVecs[0]) { + Mask.push_back(ExtractElt - VEXTOffsets[0]); + } else { + Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); } - // can't handle any other + } + + // Final check before we try to produce nonsense... + if (isShuffleMaskLegal(Mask, VT)) + return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], + &Mask[0]); + + return SDValue(); +} + +// check if an EXT instruction can handle the shuffle mask when the +// vector sources of the shuffle are the same. +static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { + unsigned NumElts = VT.getVectorNumElements(); + + // Assume that the first shuffle index is not UNDEF. Fail if it is. + if (M[0] < 0) + return false; + + Imm = M[0]; + + // If this is a VEXT shuffle, the immediate value is the index of the first + // element. The other shuffle indices must be the successive elements after + // the first one. + unsigned ExpectedElt = Imm; + for (unsigned i = 1; i < NumElts; ++i) { + // Increment the expected index. If it wraps around, just follow it + // back to index zero and keep going. + ++ExpectedElt; + if (ExpectedElt == NumElts) + ExpectedElt = 0; + + if (M[i] < 0) + continue; // ignore UNDEF indices + if (ExpectedElt != static_cast<unsigned>(M[i])) + return false; + } + + return true; +} + +// check if an EXT instruction can handle the shuffle mask when the +// vector sources of the shuffle are different. +static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT, + unsigned &Imm) { + // Look for the first non-undef element. + const int *FirstRealElt = std::find_if(M.begin(), M.end(), + [](int Elt) {return Elt >= 0;}); + + // Benefit form APInt to handle overflow when calculating expected element. + unsigned NumElts = VT.getVectorNumElements(); + unsigned MaskBits = APInt(32, NumElts * 2).logBase2(); + APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1); + // The following shuffle indices must be the successive elements after the + // first real element. + const int *FirstWrongElt = std::find_if(FirstRealElt + 1, M.end(), + [&](int Elt) {return Elt != ExpectedElt++ && Elt != -1;}); + if (FirstWrongElt != M.end()) + return false; + + // The index of an EXT is the first element if it is not UNDEF. + // Watch out for the beginning UNDEFs. The EXT index should be the expected + // value of the first element. E.g. + // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>. + // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>. + // ExpectedElt is the last mask index plus 1. + Imm = ExpectedElt.getZExtValue(); + + // There are two difference cases requiring to reverse input vectors. + // For example, for vector <4 x i32> we have the following cases, + // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>) + // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>) + // For both cases, we finally use mask <5, 6, 7, 0>, which requires + // to reverse two input vectors. + if (Imm < NumElts) + ReverseEXT = true; + else + Imm -= NumElts; + + return true; +} + +/// isREVMask - Check if a vector shuffle corresponds to a REV +/// instruction with the specified blocksize. (The order of the elements +/// within each block of the vector is reversed.) +static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { + assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && + "Only possible block sizes for REV are: 16, 32, 64"); + + unsigned EltSz = VT.getVectorElementType().getSizeInBits(); + if (EltSz == 64) + return false; + + unsigned NumElts = VT.getVectorNumElements(); + unsigned BlockElts = M[0] + 1; + // If the first shuffle index is UNDEF, be optimistic. + if (M[0] < 0) + BlockElts = BlockSize / EltSz; + + if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) return false; + + for (unsigned i = 0; i < NumElts; ++i) { + if (M[i] < 0) + continue; // ignore UNDEF indices + if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) + return false; + } + + return true; +} + +static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { + unsigned NumElts = VT.getVectorNumElements(); + WhichResult = (M[0] == 0 ? 0 : 1); + unsigned Idx = WhichResult * NumElts / 2; + for (unsigned i = 0; i != NumElts; i += 2) { + if ((M[i] >= 0 && (unsigned)M[i] != Idx) || + (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts)) + return false; + Idx += 1; + } + + return true; +} + +static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { + unsigned NumElts = VT.getVectorNumElements(); + WhichResult = (M[0] == 0 ? 0 : 1); + for (unsigned i = 0; i != NumElts; ++i) { + if (M[i] < 0) + continue; // ignore UNDEF indices + if ((unsigned)M[i] != 2 * i + WhichResult) + return false; + } + + return true; +} + +static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { + unsigned NumElts = VT.getVectorNumElements(); + WhichResult = (M[0] == 0 ? 0 : 1); + for (unsigned i = 0; i < NumElts; i += 2) { + if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || + (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult)) + return false; } + return true; +} - case 64: { - if (type != Neon_Mov_Imm) +/// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of +/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". +/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. +static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { + unsigned NumElts = VT.getVectorNumElements(); + WhichResult = (M[0] == 0 ? 0 : 1); + unsigned Idx = WhichResult * NumElts / 2; + for (unsigned i = 0; i != NumElts; i += 2) { + if ((M[i] >= 0 && (unsigned)M[i] != Idx) || + (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx)) return false; - // Neon move instr bytemask, where each byte is either 0x00 or 0xff. - // movi Op=1, Cmode=1110. - OpCmode = 0x1e; - uint64_t BitMask = 0xff; - uint64_t Val = 0; - unsigned ImmMask = 1; - Imm = 0; - for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { - if (((SplatBits | SplatUndef) & BitMask) == BitMask) { - Val |= BitMask; - Imm |= ImmMask; - } else if ((SplatBits & BitMask) != 0) { + Idx += 1; + } + + return true; +} + +/// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of +/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". +/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, +static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { + unsigned Half = VT.getVectorNumElements() / 2; + WhichResult = (M[0] == 0 ? 0 : 1); + for (unsigned j = 0; j != 2; ++j) { + unsigned Idx = WhichResult; + for (unsigned i = 0; i != Half; ++i) { + int MIdx = M[i + j * Half]; + if (MIdx >= 0 && (unsigned)MIdx != Idx) return false; - } - BitMask <<= 8; - ImmMask <<= 1; + Idx += 2; } - SplatBits = Val; - VT = is128Bits ? MVT::v2i64 : MVT::v1i64; - break; - } } return true; } -static SDValue PerformANDCombine(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI) { +/// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of +/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". +/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. +static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { + unsigned NumElts = VT.getVectorNumElements(); + WhichResult = (M[0] == 0 ? 0 : 1); + for (unsigned i = 0; i < NumElts; i += 2) { + if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || + (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult)) + return false; + } + return true; +} - SelectionDAG &DAG = DCI.DAG; - SDLoc DL(N); - EVT VT = N->getValueType(0); +static bool isINSMask(ArrayRef<int> M, int NumInputElements, + bool &DstIsLeft, int &Anomaly) { + if (M.size() != static_cast<size_t>(NumInputElements)) + return false; - // We're looking for an SRA/SHL pair which form an SBFX. + int NumLHSMatch = 0, NumRHSMatch = 0; + int LastLHSMismatch = -1, LastRHSMismatch = -1; - if (VT != MVT::i32 && VT != MVT::i64) - return SDValue(); + for (int i = 0; i < NumInputElements; ++i) { + if (M[i] == -1) { + ++NumLHSMatch; + ++NumRHSMatch; + continue; + } - if (!isa<ConstantSDNode>(N->getOperand(1))) - return SDValue(); + if (M[i] == i) + ++NumLHSMatch; + else + LastLHSMismatch = i; - uint64_t TruncMask = N->getConstantOperandVal(1); - if (!isMask_64(TruncMask)) - return SDValue(); + if (M[i] == i + NumInputElements) + ++NumRHSMatch; + else + LastRHSMismatch = i; + } - uint64_t Width = CountPopulation_64(TruncMask); - SDValue Shift = N->getOperand(0); + if (NumLHSMatch == NumInputElements - 1) { + DstIsLeft = true; + Anomaly = LastLHSMismatch; + return true; + } else if (NumRHSMatch == NumInputElements - 1) { + DstIsLeft = false; + Anomaly = LastRHSMismatch; + return true; + } - if (Shift.getOpcode() != ISD::SRL) - return SDValue(); + return false; +} - if (!isa<ConstantSDNode>(Shift->getOperand(1))) +static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) { + if (VT.getSizeInBits() != 128) + return false; + + unsigned NumElts = VT.getVectorNumElements(); + + for (int I = 0, E = NumElts / 2; I != E; I++) { + if (Mask[I] != I) + return false; + } + + int Offset = NumElts / 2; + for (int I = NumElts / 2, E = NumElts; I != E; I++) { + if (Mask[I] != I + SplitLHS * Offset) + return false; + } + + return true; +} + +static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) { + SDLoc DL(Op); + EVT VT = Op.getValueType(); + SDValue V0 = Op.getOperand(0); + SDValue V1 = Op.getOperand(1); + ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask(); + + if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() || + VT.getVectorElementType() != V1.getValueType().getVectorElementType()) return SDValue(); - uint64_t LSB = Shift->getConstantOperandVal(1); - if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) + bool SplitV0 = V0.getValueType().getSizeInBits() == 128; + + if (!isConcatMask(Mask, VT, SplitV0)) return SDValue(); - return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0), - DAG.getConstant(LSB, MVT::i64), - DAG.getConstant(LSB + Width - 1, MVT::i64)); -} - -/// For a true bitfield insert, the bits getting into that contiguous mask -/// should come from the low part of an existing value: they must be formed from -/// a compatible SHL operation (unless they're already low). This function -/// checks that condition and returns the least-significant bit that's -/// intended. If the operation not a field preparation, -1 is returned. -static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT, - SDValue &MaskedVal, uint64_t Mask) { - if (!isShiftedMask_64(Mask)) - return -1; - - // Now we need to alter MaskedVal so that it is an appropriate input for a BFI - // instruction. BFI will do a left-shift by LSB before applying the mask we've - // spotted, so in general we should pre-emptively "undo" that by making sure - // the incoming bits have had a right-shift applied to them. - // - // This right shift, however, will combine with existing left/right shifts. In - // the simplest case of a completely straight bitfield operation, it will be - // expected to completely cancel out with an existing SHL. More complicated - // cases (e.g. bitfield to bitfield copy) may still need a real shift before - // the BFI. - - uint64_t LSB = countTrailingZeros(Mask); - int64_t ShiftRightRequired = LSB; - if (MaskedVal.getOpcode() == ISD::SHL && - isa<ConstantSDNode>(MaskedVal.getOperand(1))) { - ShiftRightRequired -= MaskedVal.getConstantOperandVal(1); - MaskedVal = MaskedVal.getOperand(0); - } else if (MaskedVal.getOpcode() == ISD::SRL && - isa<ConstantSDNode>(MaskedVal.getOperand(1))) { - ShiftRightRequired += MaskedVal.getConstantOperandVal(1); - MaskedVal = MaskedVal.getOperand(0); - } - - if (ShiftRightRequired > 0) - MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal, - DAG.getConstant(ShiftRightRequired, MVT::i64)); - else if (ShiftRightRequired < 0) { - // We could actually end up with a residual left shift, for example with - // "struc.bitfield = val << 1". - MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal, - DAG.getConstant(-ShiftRightRequired, MVT::i64)); - } - - return LSB; -} - -/// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by -/// a mask and an extension. Returns true if a BFI was found and provides -/// information on its surroundings. -static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask, - bool &Extended) { - Extended = false; - if (N.getOpcode() == ISD::ZERO_EXTEND) { - Extended = true; - N = N.getOperand(0); - } - - if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) { - Mask = N->getConstantOperandVal(1); - N = N.getOperand(0); + EVT CastVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), + VT.getVectorNumElements() / 2); + if (SplitV0) { + V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0, + DAG.getConstant(0, MVT::i64)); + } + if (V1.getValueType().getSizeInBits() == 128) { + V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1, + DAG.getConstant(0, MVT::i64)); + } + return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1); +} + +/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit +/// the specified operations to build the shuffle. +static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, + SDValue RHS, SelectionDAG &DAG, + SDLoc dl) { + unsigned OpNum = (PFEntry >> 26) & 0x0F; + unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1); + unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1); + + enum { + OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> + OP_VREV, + OP_VDUP0, + OP_VDUP1, + OP_VDUP2, + OP_VDUP3, + OP_VEXT1, + OP_VEXT2, + OP_VEXT3, + OP_VUZPL, // VUZP, left result + OP_VUZPR, // VUZP, right result + OP_VZIPL, // VZIP, left result + OP_VZIPR, // VZIP, right result + OP_VTRNL, // VTRN, left result + OP_VTRNR // VTRN, right result + }; + + if (OpNum == OP_COPY) { + if (LHSID == (1 * 9 + 2) * 9 + 3) + return LHS; + assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!"); + return RHS; + } + + SDValue OpLHS, OpRHS; + OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); + OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); + EVT VT = OpLHS.getValueType(); + + switch (OpNum) { + default: + llvm_unreachable("Unknown shuffle opcode!"); + case OP_VREV: + // VREV divides the vector in half and swaps within the half. + if (VT.getVectorElementType() == MVT::i32 || + VT.getVectorElementType() == MVT::f32) + return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS); + // vrev <4 x i16> -> REV32 + if (VT.getVectorElementType() == MVT::i16) + return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS); + // vrev <4 x i8> -> REV16 + assert(VT.getVectorElementType() == MVT::i8); + return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS); + case OP_VDUP0: + case OP_VDUP1: + case OP_VDUP2: + case OP_VDUP3: { + EVT EltTy = VT.getVectorElementType(); + unsigned Opcode; + if (EltTy == MVT::i8) + Opcode = AArch64ISD::DUPLANE8; + else if (EltTy == MVT::i16) + Opcode = AArch64ISD::DUPLANE16; + else if (EltTy == MVT::i32 || EltTy == MVT::f32) + Opcode = AArch64ISD::DUPLANE32; + else if (EltTy == MVT::i64 || EltTy == MVT::f64) + Opcode = AArch64ISD::DUPLANE64; + else + llvm_unreachable("Invalid vector element type?"); + + if (VT.getSizeInBits() == 64) + OpLHS = WidenVector(OpLHS, DAG); + SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, MVT::i64); + return DAG.getNode(Opcode, dl, VT, OpLHS, Lane); + } + case OP_VEXT1: + case OP_VEXT2: + case OP_VEXT3: { + unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS); + return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS, + DAG.getConstant(Imm, MVT::i32)); + } + case OP_VUZPL: + return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS, + OpRHS); + case OP_VUZPR: + return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS, + OpRHS); + case OP_VZIPL: + return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS, + OpRHS); + case OP_VZIPR: + return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS, + OpRHS); + case OP_VTRNL: + return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS, + OpRHS); + case OP_VTRNR: + return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS, + OpRHS); + } +} + +static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask, + SelectionDAG &DAG) { + // Check to see if we can use the TBL instruction. + SDValue V1 = Op.getOperand(0); + SDValue V2 = Op.getOperand(1); + SDLoc DL(Op); + + EVT EltVT = Op.getValueType().getVectorElementType(); + unsigned BytesPerElt = EltVT.getSizeInBits() / 8; + + SmallVector<SDValue, 8> TBLMask; + for (int Val : ShuffleMask) { + for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) { + unsigned Offset = Byte + Val * BytesPerElt; + TBLMask.push_back(DAG.getConstant(Offset, MVT::i32)); + } + } + + MVT IndexVT = MVT::v8i8; + unsigned IndexLen = 8; + if (Op.getValueType().getSizeInBits() == 128) { + IndexVT = MVT::v16i8; + IndexLen = 16; + } + + SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1); + SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2); + + SDValue Shuffle; + if (V2.getNode()->getOpcode() == ISD::UNDEF) { + if (IndexLen == 8) + V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst); + Shuffle = DAG.getNode( + ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, + DAG.getConstant(Intrinsic::aarch64_neon_tbl1, MVT::i32), V1Cst, + DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT, + makeArrayRef(TBLMask.data(), IndexLen))); } else { - // Mask is the whole width. - Mask = -1ULL >> (64 - N.getValueType().getSizeInBits()); + if (IndexLen == 8) { + V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst); + Shuffle = DAG.getNode( + ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, + DAG.getConstant(Intrinsic::aarch64_neon_tbl1, MVT::i32), V1Cst, + DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT, + makeArrayRef(TBLMask.data(), IndexLen))); + } else { + // FIXME: We cannot, for the moment, emit a TBL2 instruction because we + // cannot currently represent the register constraints on the input + // table registers. + // Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst, + // DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT, + // &TBLMask[0], IndexLen)); + Shuffle = DAG.getNode( + ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, + DAG.getConstant(Intrinsic::aarch64_neon_tbl2, MVT::i32), V1Cst, V2Cst, + DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT, + makeArrayRef(TBLMask.data(), IndexLen))); + } + } + return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle); +} + +static unsigned getDUPLANEOp(EVT EltType) { + if (EltType == MVT::i8) + return AArch64ISD::DUPLANE8; + if (EltType == MVT::i16) + return AArch64ISD::DUPLANE16; + if (EltType == MVT::i32 || EltType == MVT::f32) + return AArch64ISD::DUPLANE32; + if (EltType == MVT::i64 || EltType == MVT::f64) + return AArch64ISD::DUPLANE64; + + llvm_unreachable("Invalid vector element type?"); +} + +SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + EVT VT = Op.getValueType(); + + ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); + + // Convert shuffles that are directly supported on NEON to target-specific + // DAG nodes, instead of keeping them as shuffles and matching them again + // during code selection. This is more efficient and avoids the possibility + // of inconsistencies between legalization and selection. + ArrayRef<int> ShuffleMask = SVN->getMask(); + + SDValue V1 = Op.getOperand(0); + SDValue V2 = Op.getOperand(1); + + if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], + V1.getValueType().getSimpleVT())) { + int Lane = SVN->getSplatIndex(); + // If this is undef splat, generate it via "just" vdup, if possible. + if (Lane == -1) + Lane = 0; + + if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) + return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(), + V1.getOperand(0)); + // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non- + // constant. If so, we can just reference the lane's definition directly. + if (V1.getOpcode() == ISD::BUILD_VECTOR && + !isa<ConstantSDNode>(V1.getOperand(Lane))) + return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane)); + + // Otherwise, duplicate from the lane of the input vector. + unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType()); + + // SelectionDAGBuilder may have "helpfully" already extracted or conatenated + // to make a vector of the same size as this SHUFFLE. We can ignore the + // extract entirely, and canonicalise the concat using WidenVector. + if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) { + Lane += cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue(); + V1 = V1.getOperand(0); + } else if (V1.getOpcode() == ISD::CONCAT_VECTORS) { + unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2; + Lane -= Idx * VT.getVectorNumElements() / 2; + V1 = WidenVector(V1.getOperand(Idx), DAG); + } else if (VT.getSizeInBits() == 64) + V1 = WidenVector(V1, DAG); + + return DAG.getNode(Opcode, dl, VT, V1, DAG.getConstant(Lane, MVT::i64)); + } + + if (isREVMask(ShuffleMask, VT, 64)) + return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2); + if (isREVMask(ShuffleMask, VT, 32)) + return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2); + if (isREVMask(ShuffleMask, VT, 16)) + return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2); + + bool ReverseEXT = false; + unsigned Imm; + if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) { + if (ReverseEXT) + std::swap(V1, V2); + Imm *= getExtFactor(V1); + return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2, + DAG.getConstant(Imm, MVT::i32)); + } else if (V2->getOpcode() == ISD::UNDEF && + isSingletonEXTMask(ShuffleMask, VT, Imm)) { + Imm *= getExtFactor(V1); + return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1, + DAG.getConstant(Imm, MVT::i32)); + } + + unsigned WhichResult; + if (isZIPMask(ShuffleMask, VT, WhichResult)) { + unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; + return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); + } + if (isUZPMask(ShuffleMask, VT, WhichResult)) { + unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; + return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); + } + if (isTRNMask(ShuffleMask, VT, WhichResult)) { + unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; + return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); + } + + if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { + unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; + return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); + } + if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { + unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; + return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); + } + if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) { + unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; + return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); + } + + SDValue Concat = tryFormConcatFromShuffle(Op, DAG); + if (Concat.getNode()) + return Concat; + + bool DstIsLeft; + int Anomaly; + int NumInputElements = V1.getValueType().getVectorNumElements(); + if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) { + SDValue DstVec = DstIsLeft ? V1 : V2; + SDValue DstLaneV = DAG.getConstant(Anomaly, MVT::i64); + + SDValue SrcVec = V1; + int SrcLane = ShuffleMask[Anomaly]; + if (SrcLane >= NumInputElements) { + SrcVec = V2; + SrcLane -= VT.getVectorNumElements(); + } + SDValue SrcLaneV = DAG.getConstant(SrcLane, MVT::i64); + + EVT ScalarVT = VT.getVectorElementType(); + if (ScalarVT.getSizeInBits() < 32) + ScalarVT = MVT::i32; + + return DAG.getNode( + ISD::INSERT_VECTOR_ELT, dl, VT, DstVec, + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV), + DstLaneV); + } + + // If the shuffle is not directly supported and it has 4 elements, use + // the PerfectShuffle-generated table to synthesize it from other shuffles. + unsigned NumElts = VT.getVectorNumElements(); + if (NumElts == 4) { + unsigned PFIndexes[4]; + for (unsigned i = 0; i != 4; ++i) { + if (ShuffleMask[i] < 0) + PFIndexes[i] = 8; + else + PFIndexes[i] = ShuffleMask[i]; + } + + // Compute the index in the perfect shuffle table. + unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + + PFIndexes[2] * 9 + PFIndexes[3]; + unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; + unsigned Cost = (PFEntry >> 30); + + if (Cost <= 4) + return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); } - if (N.getOpcode() == AArch64ISD::BFI) { - BFI = N; + return GenerateTBL(Op, ShuffleMask, DAG); +} + +static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits, + APInt &UndefBits) { + EVT VT = BVN->getValueType(0); + APInt SplatBits, SplatUndef; + unsigned SplatBitSize; + bool HasAnyUndefs; + if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { + unsigned NumSplats = VT.getSizeInBits() / SplatBitSize; + + for (unsigned i = 0; i < NumSplats; ++i) { + CnstBits <<= SplatBitSize; + UndefBits <<= SplatBitSize; + CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits()); + UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits()); + } + return true; } return false; } -/// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which -/// is roughly equivalent to (and (BFI ...), mask). This form is used because it -/// can often be further combined with a larger mask. Ultimately, we want mask -/// to be 2^32-1 or 2^64-1 so the AND can be skipped. -static SDValue tryCombineToBFI(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI, - const AArch64Subtarget *Subtarget) { - SelectionDAG &DAG = DCI.DAG; - SDLoc DL(N); +SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op, + SelectionDAG &DAG) const { + BuildVectorSDNode *BVN = + dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode()); + SDValue LHS = Op.getOperand(0); + SDLoc dl(Op); + EVT VT = Op.getValueType(); + + if (!BVN) + return Op; + + APInt CnstBits(VT.getSizeInBits(), 0); + APInt UndefBits(VT.getSizeInBits(), 0); + if (resolveBuildVector(BVN, CnstBits, UndefBits)) { + // We only have BIC vector immediate instruction, which is and-not. + CnstBits = ~CnstBits; + + // We make use of a little bit of goto ickiness in order to avoid having to + // duplicate the immediate matching logic for the undef toggled case. + bool SecondTry = false; + AttemptModImm: + + if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) { + CnstBits = CnstBits.zextOrTrunc(64); + uint64_t CnstVal = CnstBits.getZExtValue(); + + if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(0, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(8, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(16, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(24, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; + SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(0, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; + SDValue Mov = DAG.getNode(AArch64ISD::BICi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(8, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + } + + if (SecondTry) + goto FailedModImm; + SecondTry = true; + CnstBits = ~UndefBits; + goto AttemptModImm; + } + +// We can always fall back to a non-immediate AND. +FailedModImm: + return Op; +} + +// Specialized code to quickly find if PotentialBVec is a BuildVector that +// consists of only the same constant int value, returned in reference arg +// ConstVal +static bool isAllConstantBuildVector(const SDValue &PotentialBVec, + uint64_t &ConstVal) { + BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec); + if (!Bvec) + return false; + ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0)); + if (!FirstElt) + return false; + EVT VT = Bvec->getValueType(0); + unsigned NumElts = VT.getVectorNumElements(); + for (unsigned i = 1; i < NumElts; ++i) + if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt) + return false; + ConstVal = FirstElt->getZExtValue(); + return true; +} + +static unsigned getIntrinsicID(const SDNode *N) { + unsigned Opcode = N->getOpcode(); + switch (Opcode) { + default: + return Intrinsic::not_intrinsic; + case ISD::INTRINSIC_WO_CHAIN: { + unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); + if (IID < Intrinsic::num_intrinsics) + return IID; + return Intrinsic::not_intrinsic; + } + } +} + +// Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)), +// to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a +// BUILD_VECTORs with constant element C1, C2 is a constant, and C1 == ~C2. +// Also, logical shift right -> sri, with the same structure. +static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) { EVT VT = N->getValueType(0); - assert(N->getOpcode() == ISD::OR && "Unexpected root"); + if (!VT.isVector()) + return SDValue(); - // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or - // abandon the effort. - SDValue LHS = N->getOperand(0); - if (LHS.getOpcode() != ISD::AND) + SDLoc DL(N); + + // Is the first op an AND? + const SDValue And = N->getOperand(0); + if (And.getOpcode() != ISD::AND) return SDValue(); - uint64_t LHSMask; - if (isa<ConstantSDNode>(LHS.getOperand(1))) - LHSMask = LHS->getConstantOperandVal(1); - else + // Is the second op an shl or lshr? + SDValue Shift = N->getOperand(1); + // This will have been turned into: AArch64ISD::VSHL vector, #shift + // or AArch64ISD::VLSHR vector, #shift + unsigned ShiftOpc = Shift.getOpcode(); + if ((ShiftOpc != AArch64ISD::VSHL && ShiftOpc != AArch64ISD::VLSHR)) return SDValue(); + bool IsShiftRight = ShiftOpc == AArch64ISD::VLSHR; - // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask - // is or abandon the effort. - SDValue RHS = N->getOperand(1); - if (RHS.getOpcode() != ISD::AND) + // Is the shift amount constant? + ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); + if (!C2node) return SDValue(); - uint64_t RHSMask; - if (isa<ConstantSDNode>(RHS.getOperand(1))) - RHSMask = RHS->getConstantOperandVal(1); - else + // Is the and mask vector all constant? + uint64_t C1; + if (!isAllConstantBuildVector(And.getOperand(1), C1)) return SDValue(); - // Can't do anything if the masks are incompatible. - if (LHSMask & RHSMask) + // Is C1 == ~C2, taking into account how much one can shift elements of a + // particular size? + uint64_t C2 = C2node->getZExtValue(); + unsigned ElemSizeInBits = VT.getVectorElementType().getSizeInBits(); + if (C2 > ElemSizeInBits) + return SDValue(); + unsigned ElemMask = (1 << ElemSizeInBits) - 1; + if ((C1 & ElemMask) != (~C2 & ElemMask)) return SDValue(); - // Now we need one of the masks to be a contiguous field. Without loss of - // generality that should be the RHS one. - SDValue Bitfield = LHS.getOperand(0); - if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) { - // We know that LHS is a candidate new value, and RHS isn't already a better - // one. - std::swap(LHS, RHS); - std::swap(LHSMask, RHSMask); + SDValue X = And.getOperand(0); + SDValue Y = Shift.getOperand(0); + + unsigned Intrin = + IsShiftRight ? Intrinsic::aarch64_neon_vsri : Intrinsic::aarch64_neon_vsli; + SDValue ResultSLI = + DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, + DAG.getConstant(Intrin, MVT::i32), X, Y, Shift.getOperand(1)); + + DEBUG(dbgs() << "aarch64-lower: transformed: \n"); + DEBUG(N->dump(&DAG)); + DEBUG(dbgs() << "into: \n"); + DEBUG(ResultSLI->dump(&DAG)); + + ++NumShiftInserts; + return ResultSLI; +} + +SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op, + SelectionDAG &DAG) const { + // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2)) + if (EnableAArch64SlrGeneration) { + SDValue Res = tryLowerToSLI(Op.getNode(), DAG); + if (Res.getNode()) + return Res; + } + + BuildVectorSDNode *BVN = + dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode()); + SDValue LHS = Op.getOperand(1); + SDLoc dl(Op); + EVT VT = Op.getValueType(); + + // OR commutes, so try swapping the operands. + if (!BVN) { + LHS = Op.getOperand(0); + BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode()); } + if (!BVN) + return Op; - // We've done our best to put the right operands in the right places, all we - // can do now is check whether a BFI exists. - Bitfield = RHS.getOperand(0); - int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask); - if (LSB == -1) + APInt CnstBits(VT.getSizeInBits(), 0); + APInt UndefBits(VT.getSizeInBits(), 0); + if (resolveBuildVector(BVN, CnstBits, UndefBits)) { + // We make use of a little bit of goto ickiness in order to avoid having to + // duplicate the immediate matching logic for the undef toggled case. + bool SecondTry = false; + AttemptModImm: + + if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) { + CnstBits = CnstBits.zextOrTrunc(64); + uint64_t CnstVal = CnstBits.getZExtValue(); + + if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(0, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(8, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(16, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(24, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; + SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(0, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; + SDValue Mov = DAG.getNode(AArch64ISD::ORRi, dl, MovTy, LHS, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(8, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + } + + if (SecondTry) + goto FailedModImm; + SecondTry = true; + CnstBits = UndefBits; + goto AttemptModImm; + } + +// We can always fall back to a non-immediate OR. +FailedModImm: + return Op; +} + +SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, + SelectionDAG &DAG) const { + BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); + SDLoc dl(Op); + EVT VT = Op.getValueType(); + + APInt CnstBits(VT.getSizeInBits(), 0); + APInt UndefBits(VT.getSizeInBits(), 0); + if (resolveBuildVector(BVN, CnstBits, UndefBits)) { + // We make use of a little bit of goto ickiness in order to avoid having to + // duplicate the immediate matching logic for the undef toggled case. + bool SecondTry = false; + AttemptModImm: + + if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) { + CnstBits = CnstBits.zextOrTrunc(64); + uint64_t CnstVal = CnstBits.getZExtValue(); + + // Certain magic vector constants (used to express things like NOT + // and NEG) are passed through unmodified. This allows codegen patterns + // for these operations to match. Special-purpose patterns will lower + // these immediates to MOVIs if it proves necessary. + if (VT.isInteger() && (CnstVal == 0 || CnstVal == ~0ULL)) + return Op; + + // The many faces of MOVI... + if (AArch64_AM::isAdvSIMDModImmType10(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType10(CnstVal); + if (VT.getSizeInBits() == 128) { + SDValue Mov = DAG.getNode(AArch64ISD::MOVIedit, dl, MVT::v2i64, + DAG.getConstant(CnstVal, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + // Support the V64 version via subregister insertion. + SDValue Mov = DAG.getNode(AArch64ISD::MOVIedit, dl, MVT::f64, + DAG.getConstant(CnstVal, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(0, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(8, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(16, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(24, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; + SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(0, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; + SDValue Mov = DAG.getNode(AArch64ISD::MOVIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(8, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType7(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType7(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MOVImsl, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(264, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType8(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType8(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MOVImsl, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(272, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType9(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType9(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8; + SDValue Mov = DAG.getNode(AArch64ISD::MOVI, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + // The few faces of FMOV... + if (AArch64_AM::isAdvSIMDModImmType11(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType11(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4f32 : MVT::v2f32; + SDValue Mov = DAG.getNode(AArch64ISD::FMOV, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType12(CnstVal) && + VT.getSizeInBits() == 128) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType12(CnstVal); + SDValue Mov = DAG.getNode(AArch64ISD::FMOV, dl, MVT::v2f64, + DAG.getConstant(CnstVal, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + // The many faces of MVNI... + CnstVal = ~CnstVal; + if (AArch64_AM::isAdvSIMDModImmType1(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType1(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(0, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType2(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType2(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(8, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType3(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType3(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(16, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType4(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType4(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(24, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType5(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType5(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; + SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(0, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType6(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType6(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; + SDValue Mov = DAG.getNode(AArch64ISD::MVNIshift, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(8, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType7(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType7(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MVNImsl, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(264, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + + if (AArch64_AM::isAdvSIMDModImmType8(CnstVal)) { + CnstVal = AArch64_AM::encodeAdvSIMDModImmType8(CnstVal); + MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; + SDValue Mov = DAG.getNode(AArch64ISD::MVNImsl, dl, MovTy, + DAG.getConstant(CnstVal, MVT::i32), + DAG.getConstant(272, MVT::i32)); + return DAG.getNode(ISD::BITCAST, dl, VT, Mov); + } + } + + if (SecondTry) + goto FailedModImm; + SecondTry = true; + CnstBits = UndefBits; + goto AttemptModImm; + } +FailedModImm: + + // Scan through the operands to find some interesting properties we can + // exploit: + // 1) If only one value is used, we can use a DUP, or + // 2) if only the low element is not undef, we can just insert that, or + // 3) if only one constant value is used (w/ some non-constant lanes), + // we can splat the constant value into the whole vector then fill + // in the non-constant lanes. + // 4) FIXME: If different constant values are used, but we can intelligently + // select the values we'll be overwriting for the non-constant + // lanes such that we can directly materialize the vector + // some other way (MOVI, e.g.), we can be sneaky. + unsigned NumElts = VT.getVectorNumElements(); + bool isOnlyLowElement = true; + bool usesOnlyOneValue = true; + bool usesOnlyOneConstantValue = true; + bool isConstant = true; + unsigned NumConstantLanes = 0; + SDValue Value; + SDValue ConstantValue; + for (unsigned i = 0; i < NumElts; ++i) { + SDValue V = Op.getOperand(i); + if (V.getOpcode() == ISD::UNDEF) + continue; + if (i > 0) + isOnlyLowElement = false; + if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) + isConstant = false; + + if (isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V)) { + ++NumConstantLanes; + if (!ConstantValue.getNode()) + ConstantValue = V; + else if (ConstantValue != V) + usesOnlyOneConstantValue = false; + } + + if (!Value.getNode()) + Value = V; + else if (V != Value) + usesOnlyOneValue = false; + } + + if (!Value.getNode()) + return DAG.getUNDEF(VT); + + if (isOnlyLowElement) + return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); + + // Use DUP for non-constant splats. For f32 constant splats, reduce to + // i32 and try again. + if (usesOnlyOneValue) { + if (!isConstant) { + if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + Value.getValueType() != VT) + return DAG.getNode(AArch64ISD::DUP, dl, VT, Value); + + // This is actually a DUPLANExx operation, which keeps everything vectory. + + // DUPLANE works on 128-bit vectors, widen it if necessary. + SDValue Lane = Value.getOperand(1); + Value = Value.getOperand(0); + if (Value.getValueType().getSizeInBits() == 64) + Value = WidenVector(Value, DAG); + + unsigned Opcode = getDUPLANEOp(VT.getVectorElementType()); + return DAG.getNode(Opcode, dl, VT, Value, Lane); + } + + if (VT.getVectorElementType().isFloatingPoint()) { + SmallVector<SDValue, 8> Ops; + MVT NewType = + (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; + for (unsigned i = 0; i < NumElts; ++i) + Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i))); + EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts); + SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops); + Val = LowerBUILD_VECTOR(Val, DAG); + if (Val.getNode()) + return DAG.getNode(ISD::BITCAST, dl, VT, Val); + } + } + + // If there was only one constant value used and for more than one lane, + // start by splatting that value, then replace the non-constant lanes. This + // is better than the default, which will perform a separate initialization + // for each lane. + if (NumConstantLanes > 0 && usesOnlyOneConstantValue) { + SDValue Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue); + // Now insert the non-constant lanes. + for (unsigned i = 0; i < NumElts; ++i) { + SDValue V = Op.getOperand(i); + SDValue LaneIdx = DAG.getConstant(i, MVT::i64); + if (!isa<ConstantSDNode>(V) && !isa<ConstantFPSDNode>(V)) { + // Note that type legalization likely mucked about with the VT of the + // source operand, so we may have to convert it here before inserting. + Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx); + } + } + return Val; + } + + // If all elements are constants and the case above didn't get hit, fall back + // to the default expansion, which will generate a load from the constant + // pool. + if (isConstant) + return SDValue(); + + // Empirical tests suggest this is rarely worth it for vectors of length <= 2. + if (NumElts >= 4) { + SDValue shuffle = ReconstructShuffle(Op, DAG); + if (shuffle != SDValue()) + return shuffle; + } + + // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we + // know the default expansion would otherwise fall back on something even + // worse. For a vector with one or two non-undef values, that's + // scalar_to_vector for the elements followed by a shuffle (provided the + // shuffle is valid for the target) and materialization element by element + // on the stack followed by a load for everything else. + if (!isConstant && !usesOnlyOneValue) { + SDValue Vec = DAG.getUNDEF(VT); + SDValue Op0 = Op.getOperand(0); + unsigned ElemSize = VT.getVectorElementType().getSizeInBits(); + unsigned i = 0; + // For 32 and 64 bit types, use INSERT_SUBREG for lane zero to + // a) Avoid a RMW dependency on the full vector register, and + // b) Allow the register coalescer to fold away the copy if the + // value is already in an S or D register. + if (Op0.getOpcode() != ISD::UNDEF && (ElemSize == 32 || ElemSize == 64)) { + unsigned SubIdx = ElemSize == 32 ? AArch64::ssub : AArch64::dsub; + MachineSDNode *N = + DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, VT, Vec, Op0, + DAG.getTargetConstant(SubIdx, MVT::i32)); + Vec = SDValue(N, 0); + ++i; + } + for (; i < NumElts; ++i) { + SDValue V = Op.getOperand(i); + if (V.getOpcode() == ISD::UNDEF) + continue; + SDValue LaneIdx = DAG.getConstant(i, MVT::i64); + Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); + } + return Vec; + } + + // Just use the default expansion. We failed to find a better alternative. + return SDValue(); +} + +SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, + SelectionDAG &DAG) const { + assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!"); + + // Check for non-constant lane. + if (!isa<ConstantSDNode>(Op.getOperand(2))) return SDValue(); - uint32_t Width = CountPopulation_64(RHSMask); - assert(Width && "Expected non-zero bitfield width"); + EVT VT = Op.getOperand(0).getValueType(); - SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, - LHS.getOperand(0), Bitfield, - DAG.getConstant(LSB, MVT::i64), - DAG.getConstant(Width, MVT::i64)); + // Insertion/extraction are legal for V128 types. + if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || + VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64) + return Op; + + if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && + VT != MVT::v1i64 && VT != MVT::v2f32) + return SDValue(); - // Mask is trivial - if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits()))) - return BFI; + // For V64 types, we perform insertion by expanding the value + // to a V128 type and perform the insertion on that. + SDLoc DL(Op); + SDValue WideVec = WidenVector(Op.getOperand(0), DAG); + EVT WideTy = WideVec.getValueType(); - return DAG.getNode(ISD::AND, DL, VT, BFI, - DAG.getConstant(LHSMask | RHSMask, VT)); + SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec, + Op.getOperand(1), Op.getOperand(2)); + // Re-narrow the resultant vector. + return NarrowVector(Node, DAG); } -/// Search for the bitwise combining (with careful masks) of a MaskedBFI and its -/// original input. This is surprisingly common because SROA splits things up -/// into i8 chunks, so the originally detected MaskedBFI may actually only act -/// on the low (say) byte of a word. This is then orred into the rest of the -/// word afterwards. -/// -/// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)). -/// -/// If MASK1 and MASK2 are compatible, we can fold the whole thing into the -/// MaskedBFI. We can also deal with a certain amount of extend/truncate being -/// involved. -static SDValue tryCombineToLargerBFI(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI, - const AArch64Subtarget *Subtarget) { - SelectionDAG &DAG = DCI.DAG; - SDLoc DL(N); - EVT VT = N->getValueType(0); +SDValue +AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, + SelectionDAG &DAG) const { + assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!"); - // First job is to hunt for a MaskedBFI on either the left or right. Swap - // operands if it's actually on the right. - SDValue BFI; - SDValue PossExtraMask; - uint64_t ExistingMask = 0; - bool Extended = false; - if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended)) - PossExtraMask = N->getOperand(1); - else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended)) - PossExtraMask = N->getOperand(0); - else + // Check for non-constant lane. + if (!isa<ConstantSDNode>(Op.getOperand(1))) + return SDValue(); + + EVT VT = Op.getOperand(0).getValueType(); + + // Insertion/extraction are legal for V128 types. + if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || + VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64) + return Op; + + if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && + VT != MVT::v1i64 && VT != MVT::v2f32) + return SDValue(); + + // For V64 types, we perform extraction by expanding the value + // to a V128 type and perform the extraction on that. + SDLoc DL(Op); + SDValue WideVec = WidenVector(Op.getOperand(0), DAG); + EVT WideTy = WideVec.getValueType(); + + EVT ExtrTy = WideTy.getVectorElementType(); + if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8) + ExtrTy = MVT::i32; + + // For extractions, we just return the result directly. + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec, + Op.getOperand(1)); +} + +SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, + SelectionDAG &DAG) const { + EVT VT = Op.getOperand(0).getValueType(); + SDLoc dl(Op); + // Just in case... + if (!VT.isVector()) return SDValue(); - // We can only combine a BFI with another compatible mask. - if (PossExtraMask.getOpcode() != ISD::AND || - !isa<ConstantSDNode>(PossExtraMask.getOperand(1))) + ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1)); + if (!Cst) return SDValue(); + unsigned Val = Cst->getZExtValue(); + + unsigned Size = Op.getValueType().getSizeInBits(); + if (Val == 0) { + switch (Size) { + case 8: + return DAG.getTargetExtractSubreg(AArch64::bsub, dl, Op.getValueType(), + Op.getOperand(0)); + case 16: + return DAG.getTargetExtractSubreg(AArch64::hsub, dl, Op.getValueType(), + Op.getOperand(0)); + case 32: + return DAG.getTargetExtractSubreg(AArch64::ssub, dl, Op.getValueType(), + Op.getOperand(0)); + case 64: + return DAG.getTargetExtractSubreg(AArch64::dsub, dl, Op.getValueType(), + Op.getOperand(0)); + default: + llvm_unreachable("Unexpected vector type in extract_subvector!"); + } + } + // If this is extracting the upper 64-bits of a 128-bit vector, we match + // that directly. + if (Size == 64 && Val * VT.getVectorElementType().getSizeInBits() == 64) + return Op; + + return SDValue(); +} + +bool AArch64TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, + EVT VT) const { + if (VT.getVectorNumElements() == 4 && + (VT.is128BitVector() || VT.is64BitVector())) { + unsigned PFIndexes[4]; + for (unsigned i = 0; i != 4; ++i) { + if (M[i] < 0) + PFIndexes[i] = 8; + else + PFIndexes[i] = M[i]; + } + + // Compute the index in the perfect shuffle table. + unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + + PFIndexes[2] * 9 + PFIndexes[3]; + unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; + unsigned Cost = (PFEntry >> 30); + + if (Cost <= 4) + return true; + } + + bool DummyBool; + int DummyInt; + unsigned DummyUnsigned; + + return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) || + isREVMask(M, VT, 32) || isREVMask(M, VT, 16) || + isEXTMask(M, VT, DummyBool, DummyUnsigned) || + // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM. + isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) || + isZIPMask(M, VT, DummyUnsigned) || + isTRN_v_undef_Mask(M, VT, DummyUnsigned) || + isUZP_v_undef_Mask(M, VT, DummyUnsigned) || + isZIP_v_undef_Mask(M, VT, DummyUnsigned) || + isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) || + isConcatMask(M, VT, VT.getSizeInBits() == 128)); +} + +/// getVShiftImm - Check if this is a valid build_vector for the immediate +/// operand of a vector shift operation, where all the elements of the +/// build_vector must have the same constant integer value. +static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { + // Ignore bit_converts. + while (Op.getOpcode() == ISD::BITCAST) + Op = Op.getOperand(0); + BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); + APInt SplatBits, SplatUndef; + unsigned SplatBitSize; + bool HasAnyUndefs; + if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, + HasAnyUndefs, ElementBits) || + SplatBitSize > ElementBits) + return false; + Cnt = SplatBits.getSExtValue(); + return true; +} + +/// isVShiftLImm - Check if this is a valid build_vector for the immediate +/// operand of a vector shift left operation. That value must be in the range: +/// 0 <= Value < ElementBits for a left shift; or +/// 0 <= Value <= ElementBits for a long left shift. +static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { + assert(VT.isVector() && "vector shift count is not a vector type"); + unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); + if (!getVShiftImm(Op, ElementBits, Cnt)) + return false; + return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); +} + +/// isVShiftRImm - Check if this is a valid build_vector for the immediate +/// operand of a vector shift right operation. For a shift opcode, the value +/// is positive, but for an intrinsic the value count must be negative. The +/// absolute value must be in the range: +/// 1 <= |Value| <= ElementBits for a right shift; or +/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. +static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, + int64_t &Cnt) { + assert(VT.isVector() && "vector shift count is not a vector type"); + unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); + if (!getVShiftImm(Op, ElementBits, Cnt)) + return false; + if (isIntrinsic) + Cnt = -Cnt; + return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); +} + +SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op, + SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + SDLoc DL(Op); + int64_t Cnt; + + if (!Op.getOperand(1).getValueType().isVector()) + return Op; + unsigned EltSize = VT.getVectorElementType().getSizeInBits(); + + switch (Op.getOpcode()) { + default: + llvm_unreachable("unexpected shift opcode"); + + case ISD::SHL: + if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) + return DAG.getNode(AArch64ISD::VSHL, SDLoc(Op), VT, Op.getOperand(0), + DAG.getConstant(Cnt, MVT::i32)); + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, + DAG.getConstant(Intrinsic::aarch64_neon_ushl, MVT::i32), + Op.getOperand(0), Op.getOperand(1)); + case ISD::SRA: + case ISD::SRL: + // Right shift immediate + if (isVShiftRImm(Op.getOperand(1), VT, false, false, Cnt) && + Cnt < EltSize) { + unsigned Opc = + (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR; + return DAG.getNode(Opc, SDLoc(Op), VT, Op.getOperand(0), + DAG.getConstant(Cnt, MVT::i32)); + } + + // Right shift register. Note, there is not a shift right register + // instruction, but the shift left register instruction takes a signed + // value, where negative numbers specify a right shift. + unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl + : Intrinsic::aarch64_neon_ushl; + // negate the shift amount + SDValue NegShift = DAG.getNode(AArch64ISD::NEG, DL, VT, Op.getOperand(1)); + SDValue NegShiftLeft = + DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, + DAG.getConstant(Opc, MVT::i32), Op.getOperand(0), NegShift); + return NegShiftLeft; + } + + return SDValue(); +} + +static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS, + AArch64CC::CondCode CC, bool NoNans, EVT VT, + SDLoc dl, SelectionDAG &DAG) { + EVT SrcVT = LHS.getValueType(); + + BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode()); + APInt CnstBits(VT.getSizeInBits(), 0); + APInt UndefBits(VT.getSizeInBits(), 0); + bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits); + bool IsZero = IsCnst && (CnstBits == 0); + + if (SrcVT.getVectorElementType().isFloatingPoint()) { + switch (CC) { + default: + return SDValue(); + case AArch64CC::NE: { + SDValue Fcmeq; + if (IsZero) + Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); + else + Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); + return DAG.getNode(AArch64ISD::NOT, dl, VT, Fcmeq); + } + case AArch64CC::EQ: + if (IsZero) + return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); + case AArch64CC::GE: + if (IsZero) + return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS); + case AArch64CC::GT: + if (IsZero) + return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS); + case AArch64CC::LS: + if (IsZero) + return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS); + case AArch64CC::LT: + if (!NoNans) + return SDValue(); + // If we ignore NaNs then we can use to the MI implementation. + // Fallthrough. + case AArch64CC::MI: + if (IsZero) + return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS); + } + } + + switch (CC) { + default: + return SDValue(); + case AArch64CC::NE: { + SDValue Cmeq; + if (IsZero) + Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); + else + Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); + return DAG.getNode(AArch64ISD::NOT, dl, VT, Cmeq); + } + case AArch64CC::EQ: + if (IsZero) + return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); + case AArch64CC::GE: + if (IsZero) + return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS); + case AArch64CC::GT: + if (IsZero) + return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS); + case AArch64CC::LE: + if (IsZero) + return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS); + case AArch64CC::LS: + return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS); + case AArch64CC::LO: + return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS); + case AArch64CC::LT: + if (IsZero) + return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS); + return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS); + case AArch64CC::HI: + return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS); + case AArch64CC::HS: + return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS); + } +} - uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1); +SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op, + SelectionDAG &DAG) const { + ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + SDLoc dl(Op); + + if (LHS.getValueType().getVectorElementType().isInteger()) { + assert(LHS.getValueType() == RHS.getValueType()); + AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); + return EmitVectorComparison(LHS, RHS, AArch64CC, false, Op.getValueType(), + dl, DAG); + } + + assert(LHS.getValueType().getVectorElementType() == MVT::f32 || + LHS.getValueType().getVectorElementType() == MVT::f64); + + // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally + // clean. Some of them require two branches to implement. + AArch64CC::CondCode CC1, CC2; + bool ShouldInvert; + changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert); - // Masks must be compatible. - if (ExtraMask & ExistingMask) + bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath; + SDValue Cmp = + EmitVectorComparison(LHS, RHS, CC1, NoNaNs, Op.getValueType(), dl, DAG); + if (!Cmp.getNode()) return SDValue(); - SDValue OldBFIVal = BFI.getOperand(0); - SDValue NewBFIVal = BFI.getOperand(1); - if (Extended) { - // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be - // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments - // need to be made compatible. - assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32 - && "Invalid types for BFI"); - OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal); - NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal); + if (CC2 != AArch64CC::AL) { + SDValue Cmp2 = + EmitVectorComparison(LHS, RHS, CC2, NoNaNs, Op.getValueType(), dl, DAG); + if (!Cmp2.getNode()) + return SDValue(); + + Cmp = DAG.getNode(ISD::OR, dl, Cmp.getValueType(), Cmp, Cmp2); + } + + if (ShouldInvert) + return Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType()); + + return Cmp; +} + +/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as +/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment +/// specified in the intrinsic calls. +bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, + const CallInst &I, + unsigned Intrinsic) const { + switch (Intrinsic) { + case Intrinsic::aarch64_neon_ld2: + case Intrinsic::aarch64_neon_ld3: + case Intrinsic::aarch64_neon_ld4: + case Intrinsic::aarch64_neon_ld1x2: + case Intrinsic::aarch64_neon_ld1x3: + case Intrinsic::aarch64_neon_ld1x4: + case Intrinsic::aarch64_neon_ld2lane: + case Intrinsic::aarch64_neon_ld3lane: + case Intrinsic::aarch64_neon_ld4lane: + case Intrinsic::aarch64_neon_ld2r: + case Intrinsic::aarch64_neon_ld3r: + case Intrinsic::aarch64_neon_ld4r: { + Info.opc = ISD::INTRINSIC_W_CHAIN; + // Conservatively set memVT to the entire set of vectors loaded. + uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; + Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); + Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); + Info.offset = 0; + Info.align = 0; + Info.vol = false; // volatile loads with NEON intrinsics not supported + Info.readMem = true; + Info.writeMem = false; + return true; + } + case Intrinsic::aarch64_neon_st2: + case Intrinsic::aarch64_neon_st3: + case Intrinsic::aarch64_neon_st4: + case Intrinsic::aarch64_neon_st1x2: + case Intrinsic::aarch64_neon_st1x3: + case Intrinsic::aarch64_neon_st1x4: + case Intrinsic::aarch64_neon_st2lane: + case Intrinsic::aarch64_neon_st3lane: + case Intrinsic::aarch64_neon_st4lane: { + Info.opc = ISD::INTRINSIC_VOID; + // Conservatively set memVT to the entire set of vectors stored. + unsigned NumElts = 0; + for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { + Type *ArgTy = I.getArgOperand(ArgI)->getType(); + if (!ArgTy->isVectorTy()) + break; + NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; + } + Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); + Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); + Info.offset = 0; + Info.align = 0; + Info.vol = false; // volatile stores with NEON intrinsics not supported + Info.readMem = false; + Info.writeMem = true; + return true; + } + case Intrinsic::aarch64_ldaxr: + case Intrinsic::aarch64_ldxr: { + PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); + Info.opc = ISD::INTRINSIC_W_CHAIN; + Info.memVT = MVT::getVT(PtrTy->getElementType()); + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType()); + Info.vol = true; + Info.readMem = true; + Info.writeMem = false; + return true; + } + case Intrinsic::aarch64_stlxr: + case Intrinsic::aarch64_stxr: { + PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); + Info.opc = ISD::INTRINSIC_W_CHAIN; + Info.memVT = MVT::getVT(PtrTy->getElementType()); + Info.ptrVal = I.getArgOperand(1); + Info.offset = 0; + Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType()); + Info.vol = true; + Info.readMem = false; + Info.writeMem = true; + return true; + } + case Intrinsic::aarch64_ldaxp: + case Intrinsic::aarch64_ldxp: { + Info.opc = ISD::INTRINSIC_W_CHAIN; + Info.memVT = MVT::i128; + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Info.align = 16; + Info.vol = true; + Info.readMem = true; + Info.writeMem = false; + return true; + } + case Intrinsic::aarch64_stlxp: + case Intrinsic::aarch64_stxp: { + Info.opc = ISD::INTRINSIC_W_CHAIN; + Info.memVT = MVT::i128; + Info.ptrVal = I.getArgOperand(2); + Info.offset = 0; + Info.align = 16; + Info.vol = true; + Info.readMem = false; + Info.writeMem = true; + return true; + } + default: + break; } - // We need the MaskedBFI to be combined with a mask of the *same* value. - if (PossExtraMask.getOperand(0) != OldBFIVal) + return false; +} + +// Truncations from 64-bit GPR to 32-bit GPR is free. +bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { + if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) + return false; + unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); + unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); + if (NumBits1 <= NumBits2) + return false; + return true; +} +bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { + if (!VT1.isInteger() || !VT2.isInteger()) + return false; + unsigned NumBits1 = VT1.getSizeInBits(); + unsigned NumBits2 = VT2.getSizeInBits(); + if (NumBits1 <= NumBits2) + return false; + return true; +} + +// All 32-bit GPR operations implicitly zero the high-half of the corresponding +// 64-bit GPR. +bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { + if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) + return false; + unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); + unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); + if (NumBits1 == 32 && NumBits2 == 64) + return true; + return false; +} +bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { + if (!VT1.isInteger() || !VT2.isInteger()) + return false; + unsigned NumBits1 = VT1.getSizeInBits(); + unsigned NumBits2 = VT2.getSizeInBits(); + if (NumBits1 == 32 && NumBits2 == 64) + return true; + return false; +} + +bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { + EVT VT1 = Val.getValueType(); + if (isZExtFree(VT1, VT2)) { + return true; + } + + if (Val.getOpcode() != ISD::LOAD) + return false; + + // 8-, 16-, and 32-bit integer loads all implicitly zero-extend. + return (VT1.isSimple() && VT1.isInteger() && VT2.isSimple() && + VT2.isInteger() && VT1.getSizeInBits() <= 32); +} + +bool AArch64TargetLowering::hasPairedLoad(Type *LoadedType, + unsigned &RequiredAligment) const { + if (!LoadedType->isIntegerTy() && !LoadedType->isFloatTy()) + return false; + // Cyclone supports unaligned accesses. + RequiredAligment = 0; + unsigned NumBits = LoadedType->getPrimitiveSizeInBits(); + return NumBits == 32 || NumBits == 64; +} + +bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType, + unsigned &RequiredAligment) const { + if (!LoadedType.isSimple() || + (!LoadedType.isInteger() && !LoadedType.isFloatingPoint())) + return false; + // Cyclone supports unaligned accesses. + RequiredAligment = 0; + unsigned NumBits = LoadedType.getSizeInBits(); + return NumBits == 32 || NumBits == 64; +} + +static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, + unsigned AlignCheck) { + return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && + (DstAlign == 0 || DstAlign % AlignCheck == 0)); +} + +EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, + unsigned SrcAlign, bool IsMemset, + bool ZeroMemset, + bool MemcpyStrSrc, + MachineFunction &MF) const { + // Don't use AdvSIMD to implement 16-byte memset. It would have taken one + // instruction to materialize the v2i64 zero and one store (with restrictive + // addressing mode). Just do two i64 store of zero-registers. + bool Fast; + const Function *F = MF.getFunction(); + if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 && + !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoImplicitFloat) && + (memOpAlign(SrcAlign, DstAlign, 16) || + (allowsUnalignedMemoryAccesses(MVT::f128, 0, &Fast) && Fast))) + return MVT::f128; + + return Size >= 8 ? MVT::i64 : MVT::i32; +} + +// 12-bit optionally shifted immediates are legal for adds. +bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { + if ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0)) + return true; + return false; +} + +// Integer comparisons are implemented with ADDS/SUBS, so the range of valid +// immediates is the same as for an add or a sub. +bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const { + if (Immed < 0) + Immed *= -1; + return isLegalAddImmediate(Immed); +} + +/// isLegalAddressingMode - Return true if the addressing mode represented +/// by AM is legal for this target, for a load/store of the specified type. +bool AArch64TargetLowering::isLegalAddressingMode(const AddrMode &AM, + Type *Ty) const { + // AArch64 has five basic addressing modes: + // reg + // reg + 9-bit signed offset + // reg + SIZE_IN_BYTES * 12-bit unsigned offset + // reg1 + reg2 + // reg + SIZE_IN_BYTES * reg + + // No global is ever allowed as a base. + if (AM.BaseGV) + return false; + + // No reg+reg+imm addressing. + if (AM.HasBaseReg && AM.BaseOffs && AM.Scale) + return false; + + // check reg + imm case: + // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12 + uint64_t NumBytes = 0; + if (Ty->isSized()) { + uint64_t NumBits = getDataLayout()->getTypeSizeInBits(Ty); + NumBytes = NumBits / 8; + if (!isPowerOf2_64(NumBits)) + NumBytes = 0; + } + + if (!AM.Scale) { + int64_t Offset = AM.BaseOffs; + + // 9-bit signed offset + if (Offset >= -(1LL << 9) && Offset <= (1LL << 9) - 1) + return true; + + // 12-bit unsigned offset + unsigned shift = Log2_64(NumBytes); + if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 && + // Must be a multiple of NumBytes (NumBytes is a power of 2) + (Offset >> shift) << shift == Offset) + return true; + return false; + } + + // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2 + + if (!AM.Scale || AM.Scale == 1 || + (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes)) + return true; + return false; +} + +int AArch64TargetLowering::getScalingFactorCost(const AddrMode &AM, + Type *Ty) const { + // Scaling factors are not free at all. + // Operands | Rt Latency + // ------------------------------------------- + // Rt, [Xn, Xm] | 4 + // ------------------------------------------- + // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5 + // Rt, [Xn, Wm, <extend> #imm] | + if (isLegalAddressingMode(AM, Ty)) + // Scale represents reg2 * scale, thus account for 1 if + // it is not equal to 0 or 1. + return AM.Scale != 0 && AM.Scale != 1; + return -1; +} + +bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { + VT = VT.getScalarType(); + + if (!VT.isSimple()) + return false; + + switch (VT.getSimpleVT().SimpleTy) { + case MVT::f32: + case MVT::f64: + return true; + default: + break; + } + + return false; +} + +const MCPhysReg * +AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const { + // LR is a callee-save register, but we must treat it as clobbered by any call + // site. Hence we include LR in the scratch registers, which are in turn added + // as implicit-defs for stackmaps and patchpoints. + static const MCPhysReg ScratchRegs[] = { + AArch64::X16, AArch64::X17, AArch64::LR, 0 + }; + return ScratchRegs; +} + +bool +AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N) const { + EVT VT = N->getValueType(0); + // If N is unsigned bit extraction: ((x >> C) & mask), then do not combine + // it with shift to let it be lowered to UBFX. + if (N->getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) && + isa<ConstantSDNode>(N->getOperand(1))) { + uint64_t TruncMask = N->getConstantOperandVal(1); + if (isMask_64(TruncMask) && + N->getOperand(0).getOpcode() == ISD::SRL && + isa<ConstantSDNode>(N->getOperand(0)->getOperand(1))) + return false; + } + return true; +} + +bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, + Type *Ty) const { + assert(Ty->isIntegerTy()); + + unsigned BitSize = Ty->getPrimitiveSizeInBits(); + if (BitSize == 0) + return false; + + int64_t Val = Imm.getSExtValue(); + if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize)) + return true; + + if ((int64_t)Val < 0) + Val = ~Val; + if (BitSize == 32) + Val &= (1LL << 32) - 1; + + unsigned LZ = countLeadingZeros((uint64_t)Val); + unsigned Shift = (63 - LZ) / 16; + // MOVZ is free so return true for one or fewer MOVK. + return (Shift < 3) ? true : false; +} + +// Generate SUBS and CSEL for integer abs. +static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDLoc DL(N); + + // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) + // and change it to SUB and CSEL. + if (VT.isInteger() && N->getOpcode() == ISD::XOR && + N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1 && + N1.getOpcode() == ISD::SRA && N1.getOperand(0) == N0.getOperand(0)) + if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) + if (Y1C->getAPIntValue() == VT.getSizeInBits() - 1) { + SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT), + N0.getOperand(0)); + // Generate SUBS & CSEL. + SDValue Cmp = + DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32), + N0.getOperand(0), DAG.getConstant(0, VT)); + return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0.getOperand(0), Neg, + DAG.getConstant(AArch64CC::PL, MVT::i32), + SDValue(Cmp.getNode(), 1)); + } + return SDValue(); +} + +// performXorCombine - Attempts to handle integer ABS. +static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const AArch64Subtarget *Subtarget) { + if (DCI.isBeforeLegalizeOps()) return SDValue(); - BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, - OldBFIVal, NewBFIVal, - BFI.getOperand(2), BFI.getOperand(3)); + return performIntegerAbsCombine(N, DAG); +} - // If the masking is trivial, we don't need to create it. - if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits()))) - return BFI; +static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const AArch64Subtarget *Subtarget) { + if (DCI.isBeforeLegalizeOps()) + return SDValue(); - return DAG.getNode(ISD::AND, DL, VT, BFI, - DAG.getConstant(ExtraMask | ExistingMask, VT)); + // Multiplication of a power of two plus/minus one can be done more + // cheaply as as shift+add/sub. For now, this is true unilaterally. If + // future CPUs have a cheaper MADD instruction, this may need to be + // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and + // 64-bit is 5 cycles, so this is always a win. + if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) { + APInt Value = C->getAPIntValue(); + EVT VT = N->getValueType(0); + APInt VP1 = Value + 1; + if (VP1.isPowerOf2()) { + // Multiplying by one less than a power of two, replace with a shift + // and a subtract. + SDValue ShiftedVal = + DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), + DAG.getConstant(VP1.logBase2(), MVT::i64)); + return DAG.getNode(ISD::SUB, SDLoc(N), VT, ShiftedVal, N->getOperand(0)); + } + APInt VM1 = Value - 1; + if (VM1.isPowerOf2()) { + // Multiplying by one more than a power of two, replace with a shift + // and an add. + SDValue ShiftedVal = + DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), + DAG.getConstant(VM1.logBase2(), MVT::i64)); + return DAG.getNode(ISD::ADD, SDLoc(N), VT, ShiftedVal, N->getOperand(0)); + } + } + return SDValue(); +} + +static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + if (VT != MVT::f32 && VT != MVT::f64) + return SDValue(); + // Only optimize when the source and destination types have the same width. + if (VT.getSizeInBits() != N->getOperand(0).getValueType().getSizeInBits()) + return SDValue(); + + // If the result of an integer load is only used by an integer-to-float + // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead. + // This eliminates an "integer-to-vector-move UOP and improve throughput. + SDValue N0 = N->getOperand(0); + if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && + // Do not change the width of a volatile load. + !cast<LoadSDNode>(N0)->isVolatile()) { + LoadSDNode *LN0 = cast<LoadSDNode>(N0); + SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), + LN0->getPointerInfo(), LN0->isVolatile(), + LN0->isNonTemporal(), LN0->isInvariant(), + LN0->getAlignment()); + + // Make sure successors of the original load stay after it by updating them + // to use the new Chain. + DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1)); + + unsigned Opcode = + (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF; + return DAG.getNode(Opcode, SDLoc(N), VT, Load); + } + + return SDValue(); } /// An EXTR instruction is made up of two shifts, ORed together. This helper @@ -3782,298 +6461,952 @@ static SDValue tryCombineToEXTR(SDNode *N, std::swap(ShiftLHS, ShiftRHS); } - return DAG.getNode(AArch64ISD::EXTR, DL, VT, - LHS, RHS, + return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS, DAG.getConstant(ShiftRHS, MVT::i64)); } -/// Target-specific dag combine xforms for ISD::OR -static SDValue PerformORCombine(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI, - const AArch64Subtarget *Subtarget) { - +static SDValue tryCombineToBSL(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI) { + EVT VT = N->getValueType(0); SelectionDAG &DAG = DCI.DAG; SDLoc DL(N); + + if (!VT.isVector()) + return SDValue(); + + SDValue N0 = N->getOperand(0); + if (N0.getOpcode() != ISD::AND) + return SDValue(); + + SDValue N1 = N->getOperand(1); + if (N1.getOpcode() != ISD::AND) + return SDValue(); + + // We only have to look for constant vectors here since the general, variable + // case can be handled in TableGen. + unsigned Bits = VT.getVectorElementType().getSizeInBits(); + uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1); + for (int i = 1; i >= 0; --i) + for (int j = 1; j >= 0; --j) { + BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i)); + BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j)); + if (!BVN0 || !BVN1) + continue; + + bool FoundMatch = true; + for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) { + ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k)); + ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k)); + if (!CN0 || !CN1 || + CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) { + FoundMatch = false; + break; + } + } + + if (FoundMatch) + return DAG.getNode(AArch64ISD::BSL, DL, VT, SDValue(BVN0, 0), + N0->getOperand(1 - i), N1->getOperand(1 - j)); + } + + return SDValue(); +} + +static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, + const AArch64Subtarget *Subtarget) { + // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) + if (!EnableAArch64ExtrGeneration) + return SDValue(); + SelectionDAG &DAG = DCI.DAG; EVT VT = N->getValueType(0); - if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) + if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) return SDValue(); - // Attempt to recognise bitfield-insert operations. - SDValue Res = tryCombineToBFI(N, DCI, Subtarget); + SDValue Res = tryCombineToEXTR(N, DCI); if (Res.getNode()) return Res; - // Attempt to combine an existing MaskedBFI operation into one with a larger - // mask. - Res = tryCombineToLargerBFI(N, DCI, Subtarget); + Res = tryCombineToBSL(N, DCI); if (Res.getNode()) return Res; - Res = tryCombineToEXTR(N, DCI); - if (Res.getNode()) - return Res; + return SDValue(); +} - if (!Subtarget->hasNEON()) +static SDValue performBitcastCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + // Wait 'til after everything is legalized to try this. That way we have + // legal vector types and such. + if (DCI.isBeforeLegalizeOps()) return SDValue(); - // Attempt to use vector immediate-form BSL - // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. + // Remove extraneous bitcasts around an extract_subvector. + // For example, + // (v4i16 (bitconvert + // (extract_subvector (v2i64 (bitconvert (v8i16 ...)), (i64 1))))) + // becomes + // (extract_subvector ((v8i16 ...), (i64 4))) - SDValue N0 = N->getOperand(0); - if (N0.getOpcode() != ISD::AND) + // Only interested in 64-bit vectors as the ultimate result. + EVT VT = N->getValueType(0); + if (!VT.isVector()) return SDValue(); - - SDValue N1 = N->getOperand(1); - if (N1.getOpcode() != ISD::AND) + if (VT.getSimpleVT().getSizeInBits() != 64) return SDValue(); - - if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) { - APInt SplatUndef; - unsigned SplatBitSize; - bool HasAnyUndefs; - BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); - APInt SplatBits0; - if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, - HasAnyUndefs) && - !HasAnyUndefs) { - BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); - APInt SplatBits1; - if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, - HasAnyUndefs) && !HasAnyUndefs && - SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && - SplatBits0 == ~SplatBits1) { - - return DAG.getNode(ISD::VSELECT, DL, VT, N0->getOperand(1), - N0->getOperand(0), N1->getOperand(0)); - } - } + // Is the operand an extract_subvector starting at the beginning or halfway + // point of the vector? A low half may also come through as an + // EXTRACT_SUBREG, so look for that, too. + SDValue Op0 = N->getOperand(0); + if (Op0->getOpcode() != ISD::EXTRACT_SUBVECTOR && + !(Op0->isMachineOpcode() && + Op0->getMachineOpcode() == AArch64::EXTRACT_SUBREG)) + return SDValue(); + uint64_t idx = cast<ConstantSDNode>(Op0->getOperand(1))->getZExtValue(); + if (Op0->getOpcode() == ISD::EXTRACT_SUBVECTOR) { + if (Op0->getValueType(0).getVectorNumElements() != idx && idx != 0) + return SDValue(); + } else if (Op0->getMachineOpcode() == AArch64::EXTRACT_SUBREG) { + if (idx != AArch64::dsub) + return SDValue(); + // The dsub reference is equivalent to a lane zero subvector reference. + idx = 0; } + // Look through the bitcast of the input to the extract. + if (Op0->getOperand(0)->getOpcode() != ISD::BITCAST) + return SDValue(); + SDValue Source = Op0->getOperand(0)->getOperand(0); + // If the source type has twice the number of elements as our destination + // type, we know this is an extract of the high or low half of the vector. + EVT SVT = Source->getValueType(0); + if (SVT.getVectorNumElements() != VT.getVectorNumElements() * 2) + return SDValue(); - return SDValue(); + DEBUG(dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n"); + + // Create the simplified form to just extract the low or high half of the + // vector directly rather than bothering with the bitcasts. + SDLoc dl(N); + unsigned NumElements = VT.getVectorNumElements(); + if (idx) { + SDValue HalfIdx = DAG.getConstant(NumElements, MVT::i64); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Source, HalfIdx); + } else { + SDValue SubReg = DAG.getTargetConstant(AArch64::dsub, MVT::i32); + return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, VT, + Source, SubReg), + 0); + } } -/// Target-specific dag combine xforms for ISD::SRA -static SDValue PerformSRACombine(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI) { +static SDValue performConcatVectorsCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + // Wait 'til after everything is legalized to try this. That way we have + // legal vector types and such. + if (DCI.isBeforeLegalizeOps()) + return SDValue(); - SelectionDAG &DAG = DCI.DAG; - SDLoc DL(N); + SDLoc dl(N); EVT VT = N->getValueType(0); - // We're looking for an SRA/SHL pair which form an SBFX. + // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector + // splat. The indexed instructions are going to be expecting a DUPLANE64, so + // canonicalise to that. + if (N->getOperand(0) == N->getOperand(1) && VT.getVectorNumElements() == 2) { + assert(VT.getVectorElementType().getSizeInBits() == 64); + return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, + WidenVector(N->getOperand(0), DAG), + DAG.getConstant(0, MVT::i64)); + } - if (VT != MVT::i32 && VT != MVT::i64) + // Canonicalise concat_vectors so that the right-hand vector has as few + // bit-casts as possible before its real operation. The primary matching + // destination for these operations will be the narrowing "2" instructions, + // which depend on the operation being performed on this right-hand vector. + // For example, + // (concat_vectors LHS, (v1i64 (bitconvert (v4i16 RHS)))) + // becomes + // (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS)) + + SDValue Op1 = N->getOperand(1); + if (Op1->getOpcode() != ISD::BITCAST) return SDValue(); + SDValue RHS = Op1->getOperand(0); + MVT RHSTy = RHS.getValueType().getSimpleVT(); + // If the RHS is not a vector, this is not the pattern we're looking for. + if (!RHSTy.isVector()) + return SDValue(); + + DEBUG(dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n"); + + MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(), + RHSTy.getVectorNumElements() * 2); + return DAG.getNode( + ISD::BITCAST, dl, VT, + DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy, + DAG.getNode(ISD::BITCAST, dl, RHSTy, N->getOperand(0)), RHS)); +} - if (!isa<ConstantSDNode>(N->getOperand(1))) +static SDValue tryCombineFixedPointConvert(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + // Wait 'til after everything is legalized to try this. That way we have + // legal vector types and such. + if (DCI.isBeforeLegalizeOps()) return SDValue(); + // Transform a scalar conversion of a value from a lane extract into a + // lane extract of a vector conversion. E.g., from foo1 to foo2: + // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); } + // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; } + // + // The second form interacts better with instruction selection and the + // register allocator to avoid cross-class register copies that aren't + // coalescable due to a lane reference. + + // Check the operand and see if it originates from a lane extract. + SDValue Op1 = N->getOperand(1); + if (Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { + // Yep, no additional predication needed. Perform the transform. + SDValue IID = N->getOperand(0); + SDValue Shift = N->getOperand(2); + SDValue Vec = Op1.getOperand(0); + SDValue Lane = Op1.getOperand(1); + EVT ResTy = N->getValueType(0); + EVT VecResTy; + SDLoc DL(N); + + // The vector width should be 128 bits by the time we get here, even + // if it started as 64 bits (the extract_vector handling will have + // done so). + assert(Vec.getValueType().getSizeInBits() == 128 && + "unexpected vector size on extract_vector_elt!"); + if (Vec.getValueType() == MVT::v4i32) + VecResTy = MVT::v4f32; + else if (Vec.getValueType() == MVT::v2i64) + VecResTy = MVT::v2f64; + else + assert(0 && "unexpected vector type!"); - uint64_t ExtraSignBits = N->getConstantOperandVal(1); - SDValue Shift = N->getOperand(0); + SDValue Convert = + DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift); + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane); + } + return SDValue(); +} - if (Shift.getOpcode() != ISD::SHL) +// AArch64 high-vector "long" operations are formed by performing the non-high +// version on an extract_subvector of each operand which gets the high half: +// +// (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS)) +// +// However, there are cases which don't have an extract_high explicitly, but +// have another operation that can be made compatible with one for free. For +// example: +// +// (dupv64 scalar) --> (extract_high (dup128 scalar)) +// +// This routine does the actual conversion of such DUPs, once outer routines +// have determined that everything else is in order. +static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) { + // We can handle most types of duplicate, but the lane ones have an extra + // operand saying *which* lane, so we need to know. + bool IsDUPLANE; + switch (N.getOpcode()) { + case AArch64ISD::DUP: + IsDUPLANE = false; + break; + case AArch64ISD::DUPLANE8: + case AArch64ISD::DUPLANE16: + case AArch64ISD::DUPLANE32: + case AArch64ISD::DUPLANE64: + IsDUPLANE = true; + break; + default: return SDValue(); + } - if (!isa<ConstantSDNode>(Shift->getOperand(1))) + MVT NarrowTy = N.getSimpleValueType(); + if (!NarrowTy.is64BitVector()) return SDValue(); - uint64_t BitsOnLeft = Shift->getConstantOperandVal(1); - uint64_t Width = VT.getSizeInBits() - ExtraSignBits; - uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft; + MVT ElementTy = NarrowTy.getVectorElementType(); + unsigned NumElems = NarrowTy.getVectorNumElements(); + MVT NewDUPVT = MVT::getVectorVT(ElementTy, NumElems * 2); - if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) - return SDValue(); + SDValue NewDUP; + if (IsDUPLANE) + NewDUP = DAG.getNode(N.getOpcode(), SDLoc(N), NewDUPVT, N.getOperand(0), + N.getOperand(1)); + else + NewDUP = DAG.getNode(AArch64ISD::DUP, SDLoc(N), NewDUPVT, N.getOperand(0)); - return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0), - DAG.getConstant(LSB, MVT::i64), - DAG.getConstant(LSB + Width - 1, MVT::i64)); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N.getNode()), NarrowTy, + NewDUP, DAG.getConstant(NumElems, MVT::i64)); } -/// Check if this is a valid build_vector for the immediate operand of -/// a vector shift operation, where all the elements of the build_vector -/// must have the same constant integer value. -static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { - // Ignore bit_converts. - while (Op.getOpcode() == ISD::BITCAST) - Op = Op.getOperand(0); - BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); - APInt SplatBits, SplatUndef; - unsigned SplatBitSize; - bool HasAnyUndefs; - if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, - HasAnyUndefs, ElementBits) || - SplatBitSize > ElementBits) - return false; - Cnt = SplatBits.getSExtValue(); - return true; +static bool isEssentiallyExtractSubvector(SDValue N) { + if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR) + return true; + + return N.getOpcode() == ISD::BITCAST && + N.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR; } -/// Check if this is a valid build_vector for the immediate operand of -/// a vector shift left operation. That value must be in the range: -/// 0 <= Value < ElementBits -static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) { - assert(VT.isVector() && "vector shift count is not a vector type"); - unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); - if (!getVShiftImm(Op, ElementBits, Cnt)) +/// \brief Helper structure to keep track of ISD::SET_CC operands. +struct GenericSetCCInfo { + const SDValue *Opnd0; + const SDValue *Opnd1; + ISD::CondCode CC; +}; + +/// \brief Helper structure to keep track of a SET_CC lowered into AArch64 code. +struct AArch64SetCCInfo { + const SDValue *Cmp; + AArch64CC::CondCode CC; +}; + +/// \brief Helper structure to keep track of SetCC information. +union SetCCInfo { + GenericSetCCInfo Generic; + AArch64SetCCInfo AArch64; +}; + +/// \brief Helper structure to be able to read SetCC information. If set to +/// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a +/// GenericSetCCInfo. +struct SetCCInfoAndKind { + SetCCInfo Info; + bool IsAArch64; +}; + +/// \brief Check whether or not \p Op is a SET_CC operation, either a generic or +/// an +/// AArch64 lowered one. +/// \p SetCCInfo is filled accordingly. +/// \post SetCCInfo is meanginfull only when this function returns true. +/// \return True when Op is a kind of SET_CC operation. +static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) { + // If this is a setcc, this is straight forward. + if (Op.getOpcode() == ISD::SETCC) { + SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0); + SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1); + SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); + SetCCInfo.IsAArch64 = false; + return true; + } + // Otherwise, check if this is a matching csel instruction. + // In other words: + // - csel 1, 0, cc + // - csel 0, 1, !cc + if (Op.getOpcode() != AArch64ISD::CSEL) + return false; + // Set the information about the operands. + // TODO: we want the operands of the Cmp not the csel + SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3); + SetCCInfo.IsAArch64 = true; + SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>( + cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); + + // Check that the operands matches the constraints: + // (1) Both operands must be constants. + // (2) One must be 1 and the other must be 0. + ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0)); + ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1)); + + // Check (1). + if (!TValue || !FValue) return false; - return (Cnt >= 0 && Cnt < ElementBits); + + // Check (2). + if (!TValue->isOne()) { + // Update the comparison when we are interested in !cc. + std::swap(TValue, FValue); + SetCCInfo.Info.AArch64.CC = + AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC); + } + return TValue->isOne() && FValue->isNullValue(); } -/// Check if this is a valid build_vector for the immediate operand of a -/// vector shift right operation. The value must be in the range: -/// 1 <= Value <= ElementBits -static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) { - assert(VT.isVector() && "vector shift count is not a vector type"); - unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); - if (!getVShiftImm(Op, ElementBits, Cnt)) - return false; - return (Cnt >= 1 && Cnt <= ElementBits); +// Returns true if Op is setcc or zext of setcc. +static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) { + if (isSetCC(Op, Info)) + return true; + return ((Op.getOpcode() == ISD::ZERO_EXTEND) && + isSetCC(Op->getOperand(0), Info)); } -static SDValue GenForSextInreg(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI, - EVT SrcVT, EVT DestVT, EVT SubRegVT, - const int *Mask, SDValue Src) { - SelectionDAG &DAG = DCI.DAG; - SDValue Bitcast - = DAG.getNode(ISD::BITCAST, SDLoc(N), SrcVT, Src); - SDValue Sext - = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), DestVT, Bitcast); - SDValue ShuffleVec - = DAG.getVectorShuffle(DestVT, SDLoc(N), Sext, DAG.getUNDEF(DestVT), Mask); - SDValue ExtractSubreg - = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), - SubRegVT, ShuffleVec, - DAG.getTargetConstant(AArch64::sub_64, MVT::i32)), 0); - return ExtractSubreg; -} - -/// Checks for vector shifts and lowers them. -static SDValue PerformShiftCombine(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI, - const AArch64Subtarget *ST) { - SelectionDAG &DAG = DCI.DAG; - EVT VT = N->getValueType(0); - if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64)) - return PerformSRACombine(N, DCI); +// The folding we want to perform is: +// (add x, [zext] (setcc cc ...) ) +// --> +// (csel x, (add x, 1), !cc ...) +// +// The latter will get matched to a CSINC instruction. +static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) { + assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!"); + SDValue LHS = Op->getOperand(0); + SDValue RHS = Op->getOperand(1); + SetCCInfoAndKind InfoAndKind; + + // If neither operand is a SET_CC, give up. + if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) { + std::swap(LHS, RHS); + if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) + return SDValue(); + } - // We're looking for an SRA/SHL pair to help generating instruction - // sshll v0.8h, v0.8b, #0 - // The instruction STXL is also the alias of this instruction. - // - // For example, for DAG like below, - // v2i32 = sra (v2i32 (shl v2i32, 16)), 16 - // we can transform it into - // v2i32 = EXTRACT_SUBREG - // (v4i32 (suffle_vector - // (v4i32 (sext (v4i16 (bitcast v2i32))), - // undef, (0, 2, u, u)), - // sub_64 - // - // With this transformation we expect to generate "SSHLL + UZIP1" - // Sometimes UZIP1 can be optimized away by combining with other context. - int64_t ShrCnt, ShlCnt; - if (N->getOpcode() == ISD::SRA - && (VT == MVT::v2i32 || VT == MVT::v4i16) - && isVShiftRImm(N->getOperand(1), VT, ShrCnt) - && N->getOperand(0).getOpcode() == ISD::SHL - && isVShiftRImm(N->getOperand(0).getOperand(1), VT, ShlCnt)) { - SDValue Src = N->getOperand(0).getOperand(0); - if (VT == MVT::v2i32 && ShrCnt == 16 && ShlCnt == 16) { - // sext_inreg(v2i32, v2i16) - // We essentially only care the Mask {0, 2, u, u} - int Mask[4] = {0, 2, 4, 6}; - return GenForSextInreg(N, DCI, MVT::v4i16, MVT::v4i32, MVT::v2i32, - Mask, Src); - } - else if (VT == MVT::v2i32 && ShrCnt == 24 && ShlCnt == 24) { - // sext_inreg(v2i16, v2i8) - // We essentially only care the Mask {0, u, 4, u, u, u, u, u, u, u, u, u} - int Mask[8] = {0, 2, 4, 6, 8, 10, 12, 14}; - return GenForSextInreg(N, DCI, MVT::v8i8, MVT::v8i16, MVT::v2i32, - Mask, Src); - } - else if (VT == MVT::v4i16 && ShrCnt == 8 && ShlCnt == 8) { - // sext_inreg(v4i16, v4i8) - // We essentially only care the Mask {0, 2, 4, 6, u, u, u, u, u, u, u, u} - int Mask[8] = {0, 2, 4, 6, 8, 10, 12, 14}; - return GenForSextInreg(N, DCI, MVT::v8i8, MVT::v8i16, MVT::v4i16, - Mask, Src); - } + // FIXME: This could be generatized to work for FP comparisons. + EVT CmpVT = InfoAndKind.IsAArch64 + ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType() + : InfoAndKind.Info.Generic.Opnd0->getValueType(); + if (CmpVT != MVT::i32 && CmpVT != MVT::i64) + return SDValue(); + + SDValue CCVal; + SDValue Cmp; + SDLoc dl(Op); + if (InfoAndKind.IsAArch64) { + CCVal = DAG.getConstant( + AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), MVT::i32); + Cmp = *InfoAndKind.Info.AArch64.Cmp; + } else + Cmp = getAArch64Cmp(*InfoAndKind.Info.Generic.Opnd0, + *InfoAndKind.Info.Generic.Opnd1, + ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, true), + CCVal, DAG, dl); + + EVT VT = Op->getValueType(0); + LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, VT)); + return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp); +} + +// The basic add/sub long vector instructions have variants with "2" on the end +// which act on the high-half of their inputs. They are normally matched by +// patterns like: +// +// (add (zeroext (extract_high LHS)), +// (zeroext (extract_high RHS))) +// -> uaddl2 vD, vN, vM +// +// However, if one of the extracts is something like a duplicate, this +// instruction can still be used profitably. This function puts the DAG into a +// more appropriate form for those patterns to trigger. +static SDValue performAddSubLongCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + if (DCI.isBeforeLegalizeOps()) + return SDValue(); + + MVT VT = N->getSimpleValueType(0); + if (!VT.is128BitVector()) { + if (N->getOpcode() == ISD::ADD) + return performSetccAddFolding(N, DAG); + return SDValue(); } - // Nothing to be done for scalar shifts. - const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - if (!VT.isVector() || !TLI.isTypeLegal(VT)) + // Make sure both branches are extended in the same way. + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + if ((LHS.getOpcode() != ISD::ZERO_EXTEND && + LHS.getOpcode() != ISD::SIGN_EXTEND) || + LHS.getOpcode() != RHS.getOpcode()) return SDValue(); - assert(ST->hasNEON() && "unexpected vector shift"); - int64_t Cnt; + unsigned ExtType = LHS.getOpcode(); - switch (N->getOpcode()) { - default: - llvm_unreachable("unexpected shift opcode"); + // It's not worth doing if at least one of the inputs isn't already an + // extract, but we don't know which it'll be so we have to try both. + if (isEssentiallyExtractSubvector(LHS.getOperand(0))) { + RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG); + if (!RHS.getNode()) + return SDValue(); - case ISD::SHL: - if (isVShiftLImm(N->getOperand(1), VT, Cnt)) { - SDValue RHS = - DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, - DAG.getConstant(Cnt, MVT::i32)); - return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS); - } - break; + RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS); + } else if (isEssentiallyExtractSubvector(RHS.getOperand(0))) { + LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG); + if (!LHS.getNode()) + return SDValue(); - case ISD::SRA: - case ISD::SRL: - if (isVShiftRImm(N->getOperand(1), VT, Cnt)) { - SDValue RHS = - DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, - DAG.getConstant(Cnt, MVT::i32)); - return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS); - } + LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS); + } + + return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS); +} + +// Massage DAGs which we can use the high-half "long" operations on into +// something isel will recognize better. E.g. +// +// (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) --> +// (aarch64_neon_umull (extract_high (v2i64 vec))) +// (extract_high (v2i64 (dup128 scalar))))) +// +static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + if (DCI.isBeforeLegalizeOps()) + return SDValue(); + + SDValue LHS = N->getOperand(1); + SDValue RHS = N->getOperand(2); + assert(LHS.getValueType().is64BitVector() && + RHS.getValueType().is64BitVector() && + "unexpected shape for long operation"); + + // Either node could be a DUP, but it's not worth doing both of them (you'd + // just as well use the non-high version) so look for a corresponding extract + // operation on the other "wing". + if (isEssentiallyExtractSubvector(LHS)) { + RHS = tryExtendDUPToExtractHigh(RHS, DAG); + if (!RHS.getNode()) + return SDValue(); + } else if (isEssentiallyExtractSubvector(RHS)) { + LHS = tryExtendDUPToExtractHigh(LHS, DAG); + if (!LHS.getNode()) + return SDValue(); + } + + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0), + N->getOperand(0), LHS, RHS); +} + +static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) { + MVT ElemTy = N->getSimpleValueType(0).getScalarType(); + unsigned ElemBits = ElemTy.getSizeInBits(); + + int64_t ShiftAmount; + if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) { + APInt SplatValue, SplatUndef; + unsigned SplatBitSize; + bool HasAnyUndefs; + if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, + HasAnyUndefs, ElemBits) || + SplatBitSize != ElemBits) + return SDValue(); + + ShiftAmount = SplatValue.getSExtValue(); + } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) { + ShiftAmount = CVN->getSExtValue(); + } else + return SDValue(); + + unsigned Opcode; + bool IsRightShift; + switch (IID) { + default: + llvm_unreachable("Unknown shift intrinsic"); + case Intrinsic::aarch64_neon_sqshl: + Opcode = AArch64ISD::SQSHL_I; + IsRightShift = false; + break; + case Intrinsic::aarch64_neon_uqshl: + Opcode = AArch64ISD::UQSHL_I; + IsRightShift = false; + break; + case Intrinsic::aarch64_neon_srshl: + Opcode = AArch64ISD::SRSHR_I; + IsRightShift = true; + break; + case Intrinsic::aarch64_neon_urshl: + Opcode = AArch64ISD::URSHR_I; + IsRightShift = true; + break; + case Intrinsic::aarch64_neon_sqshlu: + Opcode = AArch64ISD::SQSHLU_I; + IsRightShift = false; break; } + if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) + return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), N->getOperand(1), + DAG.getConstant(-ShiftAmount, MVT::i32)); + else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount <= ElemBits) + return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), N->getOperand(1), + DAG.getConstant(ShiftAmount, MVT::i32)); + return SDValue(); } -/// ARM-specific DAG combining for intrinsics. -static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { - unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); +// The CRC32[BH] instructions ignore the high bits of their data operand. Since +// the intrinsics must be legal and take an i32, this means there's almost +// certainly going to be a zext in the DAG which we can eliminate. +static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) { + SDValue AndN = N->getOperand(2); + if (AndN.getOpcode() != ISD::AND) + return SDValue(); + + ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1)); + if (!CMask || CMask->getZExtValue() != Mask) + return SDValue(); + + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32, + N->getOperand(0), N->getOperand(1), AndN.getOperand(0)); +} - switch (IntNo) { +static SDValue performIntrinsicCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + const AArch64Subtarget *Subtarget) { + SelectionDAG &DAG = DCI.DAG; + unsigned IID = getIntrinsicID(N); + switch (IID) { default: - // Don't do anything for most intrinsics. break; + case Intrinsic::aarch64_neon_vcvtfxs2fp: + case Intrinsic::aarch64_neon_vcvtfxu2fp: + return tryCombineFixedPointConvert(N, DCI, DAG); + break; + case Intrinsic::aarch64_neon_fmax: + return DAG.getNode(AArch64ISD::FMAX, SDLoc(N), N->getValueType(0), + N->getOperand(1), N->getOperand(2)); + case Intrinsic::aarch64_neon_fmin: + return DAG.getNode(AArch64ISD::FMIN, SDLoc(N), N->getValueType(0), + N->getOperand(1), N->getOperand(2)); + case Intrinsic::aarch64_neon_smull: + case Intrinsic::aarch64_neon_umull: + case Intrinsic::aarch64_neon_pmull: + case Intrinsic::aarch64_neon_sqdmull: + return tryCombineLongOpWithDup(IID, N, DCI, DAG); + case Intrinsic::aarch64_neon_sqshl: + case Intrinsic::aarch64_neon_uqshl: + case Intrinsic::aarch64_neon_sqshlu: + case Intrinsic::aarch64_neon_srshl: + case Intrinsic::aarch64_neon_urshl: + return tryCombineShiftImm(IID, N, DAG); + case Intrinsic::aarch64_crc32b: + case Intrinsic::aarch64_crc32cb: + return tryCombineCRC32(0xff, N, DAG); + case Intrinsic::aarch64_crc32h: + case Intrinsic::aarch64_crc32ch: + return tryCombineCRC32(0xffff, N, DAG); + } + return SDValue(); +} - case Intrinsic::arm_neon_vqshifts: - case Intrinsic::arm_neon_vqshiftu: - EVT VT = N->getOperand(1).getValueType(); - int64_t Cnt; - if (!isVShiftLImm(N->getOperand(2), VT, Cnt)) - break; - unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts) - ? AArch64ISD::NEON_QSHLs - : AArch64ISD::NEON_QSHLu; - return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0), - N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); +static SDValue performExtendCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then + // we can convert that DUP into another extract_high (of a bigger DUP), which + // helps the backend to decide that an sabdl2 would be useful, saving a real + // extract_high operation. + if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND && + N->getOperand(0).getOpcode() == ISD::INTRINSIC_WO_CHAIN) { + SDNode *ABDNode = N->getOperand(0).getNode(); + unsigned IID = getIntrinsicID(ABDNode); + if (IID == Intrinsic::aarch64_neon_sabd || + IID == Intrinsic::aarch64_neon_uabd) { + SDValue NewABD = tryCombineLongOpWithDup(IID, ABDNode, DCI, DAG); + if (!NewABD.getNode()) + return SDValue(); + + return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), + NewABD); + } + } + + // This is effectively a custom type legalization for AArch64. + // + // Type legalization will split an extend of a small, legal, type to a larger + // illegal type by first splitting the destination type, often creating + // illegal source types, which then get legalized in isel-confusing ways, + // leading to really terrible codegen. E.g., + // %result = v8i32 sext v8i8 %value + // becomes + // %losrc = extract_subreg %value, ... + // %hisrc = extract_subreg %value, ... + // %lo = v4i32 sext v4i8 %losrc + // %hi = v4i32 sext v4i8 %hisrc + // Things go rapidly downhill from there. + // + // For AArch64, the [sz]ext vector instructions can only go up one element + // size, so we can, e.g., extend from i8 to i16, but to go from i8 to i32 + // take two instructions. + // + // This implies that the most efficient way to do the extend from v8i8 + // to two v4i32 values is to first extend the v8i8 to v8i16, then do + // the normal splitting to happen for the v8i16->v8i32. + + // This is pre-legalization to catch some cases where the default + // type legalization will create ill-tempered code. + if (!DCI.isBeforeLegalizeOps()) + return SDValue(); + + // We're only interested in cleaning things up for non-legal vector types + // here. If both the source and destination are legal, things will just + // work naturally without any fiddling. + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + EVT ResVT = N->getValueType(0); + if (!ResVT.isVector() || TLI.isTypeLegal(ResVT)) + return SDValue(); + // If the vector type isn't a simple VT, it's beyond the scope of what + // we're worried about here. Let legalization do its thing and hope for + // the best. + if (!ResVT.isSimple()) + return SDValue(); + + SDValue Src = N->getOperand(0); + MVT SrcVT = Src->getValueType(0).getSimpleVT(); + // If the source VT is a 64-bit vector, we can play games and get the + // better results we want. + if (SrcVT.getSizeInBits() != 64) + return SDValue(); + + unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits(); + unsigned ElementCount = SrcVT.getVectorNumElements(); + SrcVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize * 2), ElementCount); + SDLoc DL(N); + Src = DAG.getNode(N->getOpcode(), DL, SrcVT, Src); + + // Now split the rest of the operation into two halves, each with a 64 + // bit source. + EVT LoVT, HiVT; + SDValue Lo, Hi; + unsigned NumElements = ResVT.getVectorNumElements(); + assert(!(NumElements & 1) && "Splitting vector, but not in half!"); + LoVT = HiVT = EVT::getVectorVT(*DAG.getContext(), + ResVT.getVectorElementType(), NumElements / 2); + + EVT InNVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getVectorElementType(), + LoVT.getVectorNumElements()); + Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src, + DAG.getIntPtrConstant(0)); + Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src, + DAG.getIntPtrConstant(InNVT.getVectorNumElements())); + Lo = DAG.getNode(N->getOpcode(), DL, LoVT, Lo); + Hi = DAG.getNode(N->getOpcode(), DL, HiVT, Hi); + + // Now combine the parts back together so we still have a single result + // like the combiner expects. + return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Lo, Hi); +} + +/// Replace a splat of a scalar to a vector store by scalar stores of the scalar +/// value. The load store optimizer pass will merge them to store pair stores. +/// This has better performance than a splat of the scalar followed by a split +/// vector store. Even if the stores are not merged it is four stores vs a dup, +/// followed by an ext.b and two stores. +static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode *St) { + SDValue StVal = St->getValue(); + EVT VT = StVal.getValueType(); + + // Don't replace floating point stores, they possibly won't be transformed to + // stp because of the store pair suppress pass. + if (VT.isFloatingPoint()) + return SDValue(); + + // Check for insert vector elements. + if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT) + return SDValue(); + + // We can express a splat as store pair(s) for 2 or 4 elements. + unsigned NumVecElts = VT.getVectorNumElements(); + if (NumVecElts != 4 && NumVecElts != 2) + return SDValue(); + SDValue SplatVal = StVal.getOperand(1); + unsigned RemainInsertElts = NumVecElts - 1; + + // Check that this is a splat. + while (--RemainInsertElts) { + SDValue NextInsertElt = StVal.getOperand(0); + if (NextInsertElt.getOpcode() != ISD::INSERT_VECTOR_ELT) + return SDValue(); + if (NextInsertElt.getOperand(1) != SplatVal) + return SDValue(); + StVal = NextInsertElt; + } + unsigned OrigAlignment = St->getAlignment(); + unsigned EltOffset = NumVecElts == 4 ? 4 : 8; + unsigned Alignment = std::min(OrigAlignment, EltOffset); + + // Create scalar stores. This is at least as good as the code sequence for a + // split unaligned store wich is a dup.s, ext.b, and two stores. + // Most of the time the three stores should be replaced by store pair + // instructions (stp). + SDLoc DL(St); + SDValue BasePtr = St->getBasePtr(); + SDValue NewST1 = + DAG.getStore(St->getChain(), DL, SplatVal, BasePtr, St->getPointerInfo(), + St->isVolatile(), St->isNonTemporal(), St->getAlignment()); + + unsigned Offset = EltOffset; + while (--NumVecElts) { + SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, + DAG.getConstant(Offset, MVT::i64)); + NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr, + St->getPointerInfo(), St->isVolatile(), + St->isNonTemporal(), Alignment); + Offset += EltOffset; + } + return NewST1; +} + +static SDValue performSTORECombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG, + const AArch64Subtarget *Subtarget) { + if (!DCI.isBeforeLegalize()) + return SDValue(); + + StoreSDNode *S = cast<StoreSDNode>(N); + if (S->isVolatile()) + return SDValue(); + + // Cyclone has bad performance on unaligned 16B stores when crossing line and + // page boundries. We want to split such stores. + if (!Subtarget->isCyclone()) + return SDValue(); + + // Don't split at Oz. + MachineFunction &MF = DAG.getMachineFunction(); + bool IsMinSize = MF.getFunction()->getAttributes().hasAttribute( + AttributeSet::FunctionIndex, Attribute::MinSize); + if (IsMinSize) + return SDValue(); + + SDValue StVal = S->getValue(); + EVT VT = StVal.getValueType(); + + // Don't split v2i64 vectors. Memcpy lowering produces those and splitting + // those up regresses performance on micro-benchmarks and olden/bh. + if (!VT.isVector() || VT.getVectorNumElements() < 2 || VT == MVT::v2i64) + return SDValue(); + + // Split unaligned 16B stores. They are terrible for performance. + // Don't split stores with alignment of 1 or 2. Code that uses clang vector + // extensions can use this to mark that it does not want splitting to happen + // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of + // eliminating alignment hazards is only 1 in 8 for alignment of 2. + if (VT.getSizeInBits() != 128 || S->getAlignment() >= 16 || + S->getAlignment() <= 2) + return SDValue(); + + // If we get a splat of a scalar convert this vector store to a store of + // scalars. They will be merged into store pairs thereby removing two + // instructions. + SDValue ReplacedSplat = replaceSplatVectorStore(DAG, S); + if (ReplacedSplat != SDValue()) + return ReplacedSplat; + + SDLoc DL(S); + unsigned NumElts = VT.getVectorNumElements() / 2; + // Split VT into two. + EVT HalfVT = + EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), NumElts); + SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, + DAG.getIntPtrConstant(0)); + SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, + DAG.getIntPtrConstant(NumElts)); + SDValue BasePtr = S->getBasePtr(); + SDValue NewST1 = + DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(), + S->isVolatile(), S->isNonTemporal(), S->getAlignment()); + SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, + DAG.getConstant(8, MVT::i64)); + return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr, + S->getPointerInfo(), S->isVolatile(), S->isNonTemporal(), + S->getAlignment()); +} + +/// Target-specific DAG combine function for post-increment LD1 (lane) and +/// post-increment LD1R. +static SDValue performPostLD1Combine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + bool IsLaneOp) { + if (DCI.isBeforeLegalizeOps()) + return SDValue(); + + SelectionDAG &DAG = DCI.DAG; + EVT VT = N->getValueType(0); + + unsigned LoadIdx = IsLaneOp ? 1 : 0; + SDNode *LD = N->getOperand(LoadIdx).getNode(); + // If it is not LOAD, can not do such combine. + if (LD->getOpcode() != ISD::LOAD) + return SDValue(); + + LoadSDNode *LoadSDN = cast<LoadSDNode>(LD); + EVT MemVT = LoadSDN->getMemoryVT(); + // Check if memory operand is the same type as the vector element. + if (MemVT != VT.getVectorElementType()) + return SDValue(); + + // Check if there are other uses. If so, do not combine as it will introduce + // an extra load. + for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE; + ++UI) { + if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result. + continue; + if (*UI != N) + return SDValue(); } + SDValue Addr = LD->getOperand(1); + SDValue Vector = N->getOperand(0); + // Search for a use of the address operand that is an increment. + for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE = + Addr.getNode()->use_end(); UI != UE; ++UI) { + SDNode *User = *UI; + if (User->getOpcode() != ISD::ADD + || UI.getUse().getResNo() != Addr.getResNo()) + continue; + + // Check that the add is independent of the load. Otherwise, folding it + // would create a cycle. + if (User->isPredecessorOf(LD) || LD->isPredecessorOf(User)) + continue; + // Also check that add is not used in the vector operand. This would also + // create a cycle. + if (User->isPredecessorOf(Vector.getNode())) + continue; + + // If the increment is a constant, it must match the memory ref size. + SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); + if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { + uint32_t IncVal = CInc->getZExtValue(); + unsigned NumBytes = VT.getScalarSizeInBits() / 8; + if (IncVal != NumBytes) + continue; + Inc = DAG.getRegister(AArch64::XZR, MVT::i64); + } + + SmallVector<SDValue, 8> Ops; + Ops.push_back(LD->getOperand(0)); // Chain + if (IsLaneOp) { + Ops.push_back(Vector); // The vector to be inserted + Ops.push_back(N->getOperand(2)); // The lane to be inserted in the vector + } + Ops.push_back(Addr); + Ops.push_back(Inc); + + EVT Tys[3] = { VT, MVT::i64, MVT::Other }; + SDVTList SDTys = DAG.getVTList(ArrayRef<EVT>(Tys, 3)); + unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost; + SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops, + MemVT, + LoadSDN->getMemOperand()); + + // Update the uses. + std::vector<SDValue> NewResults; + NewResults.push_back(SDValue(LD, 0)); // The result of load + NewResults.push_back(SDValue(UpdN.getNode(), 2)); // Chain + DCI.CombineTo(LD, NewResults); + DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result + DCI.CombineTo(User, SDValue(UpdN.getNode(), 1)); // Write back register + + break; + } return SDValue(); } /// Target-specific DAG combine function for NEON load/store intrinsics /// to merge base address updates. -static SDValue CombineBaseUpdate(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI) { +static SDValue performNEONPostLDSTCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) return SDValue(); - SelectionDAG &DAG = DCI.DAG; - bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || - N->getOpcode() == ISD::INTRINSIC_W_CHAIN); - unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); + unsigned AddrOpIdx = N->getNumOperands() - 1; SDValue Addr = N->getOperand(AddrOpIdx); // Search for a use of the address operand that is an increment. @@ -4090,106 +7423,96 @@ static SDValue CombineBaseUpdate(SDNode *N, continue; // Find the new opcode for the updating load/store. - bool isLoad = true; - bool isLaneOp = false; + bool IsStore = false; + bool IsLaneOp = false; + bool IsDupOp = false; unsigned NewOpc = 0; unsigned NumVecs = 0; - if (isIntrinsic) { - unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); - switch (IntNo) { - default: llvm_unreachable("unexpected intrinsic for Neon base update"); - case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD; - NumVecs = 1; break; - case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD; - NumVecs = 2; break; - case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD; - NumVecs = 3; break; - case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD; - NumVecs = 4; break; - case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD; - NumVecs = 1; isLoad = false; break; - case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD; - NumVecs = 2; isLoad = false; break; - case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD; - NumVecs = 3; isLoad = false; break; - case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD; - NumVecs = 4; isLoad = false; break; - case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD; - NumVecs = 2; break; - case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD; - NumVecs = 3; break; - case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD; - NumVecs = 4; break; - case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD; - NumVecs = 2; isLoad = false; break; - case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD; - NumVecs = 3; isLoad = false; break; - case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD; - NumVecs = 4; isLoad = false; break; - case Intrinsic::arm_neon_vld2lane: NewOpc = AArch64ISD::NEON_LD2LN_UPD; - NumVecs = 2; isLaneOp = true; break; - case Intrinsic::arm_neon_vld3lane: NewOpc = AArch64ISD::NEON_LD3LN_UPD; - NumVecs = 3; isLaneOp = true; break; - case Intrinsic::arm_neon_vld4lane: NewOpc = AArch64ISD::NEON_LD4LN_UPD; - NumVecs = 4; isLaneOp = true; break; - case Intrinsic::arm_neon_vst2lane: NewOpc = AArch64ISD::NEON_ST2LN_UPD; - NumVecs = 2; isLoad = false; isLaneOp = true; break; - case Intrinsic::arm_neon_vst3lane: NewOpc = AArch64ISD::NEON_ST3LN_UPD; - NumVecs = 3; isLoad = false; isLaneOp = true; break; - case Intrinsic::arm_neon_vst4lane: NewOpc = AArch64ISD::NEON_ST4LN_UPD; - NumVecs = 4; isLoad = false; isLaneOp = true; break; - } - } else { - isLaneOp = true; - switch (N->getOpcode()) { - default: llvm_unreachable("unexpected opcode for Neon base update"); - case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD; - NumVecs = 2; break; - case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD; - NumVecs = 3; break; - case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD; - NumVecs = 4; break; - } + unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); + switch (IntNo) { + default: llvm_unreachable("unexpected intrinsic for Neon base update"); + case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post; + NumVecs = 2; break; + case Intrinsic::aarch64_neon_ld3: NewOpc = AArch64ISD::LD3post; + NumVecs = 3; break; + case Intrinsic::aarch64_neon_ld4: NewOpc = AArch64ISD::LD4post; + NumVecs = 4; break; + case Intrinsic::aarch64_neon_st2: NewOpc = AArch64ISD::ST2post; + NumVecs = 2; IsStore = true; break; + case Intrinsic::aarch64_neon_st3: NewOpc = AArch64ISD::ST3post; + NumVecs = 3; IsStore = true; break; + case Intrinsic::aarch64_neon_st4: NewOpc = AArch64ISD::ST4post; + NumVecs = 4; IsStore = true; break; + case Intrinsic::aarch64_neon_ld1x2: NewOpc = AArch64ISD::LD1x2post; + NumVecs = 2; break; + case Intrinsic::aarch64_neon_ld1x3: NewOpc = AArch64ISD::LD1x3post; + NumVecs = 3; break; + case Intrinsic::aarch64_neon_ld1x4: NewOpc = AArch64ISD::LD1x4post; + NumVecs = 4; break; + case Intrinsic::aarch64_neon_st1x2: NewOpc = AArch64ISD::ST1x2post; + NumVecs = 2; IsStore = true; break; + case Intrinsic::aarch64_neon_st1x3: NewOpc = AArch64ISD::ST1x3post; + NumVecs = 3; IsStore = true; break; + case Intrinsic::aarch64_neon_st1x4: NewOpc = AArch64ISD::ST1x4post; + NumVecs = 4; IsStore = true; break; + case Intrinsic::aarch64_neon_ld2r: NewOpc = AArch64ISD::LD2DUPpost; + NumVecs = 2; IsDupOp = true; break; + case Intrinsic::aarch64_neon_ld3r: NewOpc = AArch64ISD::LD3DUPpost; + NumVecs = 3; IsDupOp = true; break; + case Intrinsic::aarch64_neon_ld4r: NewOpc = AArch64ISD::LD4DUPpost; + NumVecs = 4; IsDupOp = true; break; + case Intrinsic::aarch64_neon_ld2lane: NewOpc = AArch64ISD::LD2LANEpost; + NumVecs = 2; IsLaneOp = true; break; + case Intrinsic::aarch64_neon_ld3lane: NewOpc = AArch64ISD::LD3LANEpost; + NumVecs = 3; IsLaneOp = true; break; + case Intrinsic::aarch64_neon_ld4lane: NewOpc = AArch64ISD::LD4LANEpost; + NumVecs = 4; IsLaneOp = true; break; + case Intrinsic::aarch64_neon_st2lane: NewOpc = AArch64ISD::ST2LANEpost; + NumVecs = 2; IsStore = true; IsLaneOp = true; break; + case Intrinsic::aarch64_neon_st3lane: NewOpc = AArch64ISD::ST3LANEpost; + NumVecs = 3; IsStore = true; IsLaneOp = true; break; + case Intrinsic::aarch64_neon_st4lane: NewOpc = AArch64ISD::ST4LANEpost; + NumVecs = 4; IsStore = true; IsLaneOp = true; break; } - // Find the size of memory referenced by the load/store. EVT VecTy; - if (isLoad) - VecTy = N->getValueType(0); + if (IsStore) + VecTy = N->getOperand(2).getValueType(); else - VecTy = N->getOperand(AddrOpIdx + 1).getValueType(); - unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; - if (isLaneOp) - NumBytes /= VecTy.getVectorNumElements(); + VecTy = N->getValueType(0); // If the increment is a constant, it must match the memory ref size. SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { uint32_t IncVal = CInc->getZExtValue(); + unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; + if (IsLaneOp || IsDupOp) + NumBytes /= VecTy.getVectorNumElements(); if (IncVal != NumBytes) continue; - Inc = DAG.getTargetConstant(IncVal, MVT::i32); + Inc = DAG.getRegister(AArch64::XZR, MVT::i64); } + SmallVector<SDValue, 8> Ops; + Ops.push_back(N->getOperand(0)); // Incoming chain + // Load lane and store have vector list as input. + if (IsLaneOp || IsStore) + for (unsigned i = 2; i < AddrOpIdx; ++i) + Ops.push_back(N->getOperand(i)); + Ops.push_back(Addr); // Base register + Ops.push_back(Inc); - // Create the new updating load/store node. + // Return Types. EVT Tys[6]; - unsigned NumResultVecs = (isLoad ? NumVecs : 0); + unsigned NumResultVecs = (IsStore ? 0 : NumVecs); unsigned n; for (n = 0; n < NumResultVecs; ++n) Tys[n] = VecTy; - Tys[n++] = MVT::i64; - Tys[n] = MVT::Other; - SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2); - SmallVector<SDValue, 8> Ops; - Ops.push_back(N->getOperand(0)); // incoming chain - Ops.push_back(N->getOperand(AddrOpIdx)); - Ops.push_back(Inc); - for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { - Ops.push_back(N->getOperand(i)); - } + Tys[n++] = MVT::i64; // Type of write back register + Tys[n] = MVT::Other; // Type of the chain + SDVTList SDTys = DAG.getVTList(ArrayRef<EVT>(Tys, NumResultVecs + 2)); + MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); - SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, - Ops.data(), Ops.size(), + SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops, MemInt->getMemoryVT(), MemInt->getMemOperand()); @@ -4198,7 +7521,7 @@ static SDValue CombineBaseUpdate(SDNode *N, for (unsigned i = 0; i < NumResultVecs; ++i) { NewResults.push_back(SDValue(UpdN.getNode(), i)); } - NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain + NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); DCI.CombineTo(N, NewResults); DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); @@ -4207,107 +7530,58 @@ static SDValue CombineBaseUpdate(SDNode *N, return SDValue(); } -/// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) -/// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs. -/// If so, combine them to a vldN-dup operation and return true. -static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { - SelectionDAG &DAG = DCI.DAG; - EVT VT = N->getValueType(0); - - // Check if the VDUPLANE operand is a vldN-dup intrinsic. - SDNode *VLD = N->getOperand(0).getNode(); - if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) +// Optimize compare with zero and branch. +static SDValue performBRCONDCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + SDValue Chain = N->getOperand(0); + SDValue Dest = N->getOperand(1); + SDValue CCVal = N->getOperand(2); + SDValue Cmp = N->getOperand(3); + + assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!"); + unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue(); + if (CC != AArch64CC::EQ && CC != AArch64CC::NE) return SDValue(); - unsigned NumVecs = 0; - unsigned NewOpc = 0; - unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); - if (IntNo == Intrinsic::arm_neon_vld2lane) { - NumVecs = 2; - NewOpc = AArch64ISD::NEON_LD2DUP; - } else if (IntNo == Intrinsic::arm_neon_vld3lane) { - NumVecs = 3; - NewOpc = AArch64ISD::NEON_LD3DUP; - } else if (IntNo == Intrinsic::arm_neon_vld4lane) { - NumVecs = 4; - NewOpc = AArch64ISD::NEON_LD4DUP; - } else { + + unsigned CmpOpc = Cmp.getOpcode(); + if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS) return SDValue(); - } - // First check that all the vldN-lane uses are VDUPLANEs and that the lane - // numbers match the load. - unsigned VLDLaneNo = - cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue(); - for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); - UI != UE; ++UI) { - // Ignore uses of the chain result. - if (UI.getUse().getResNo() == NumVecs) - continue; - SDNode *User = *UI; - if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE || - VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) - return SDValue(); - } + // Only attempt folding if there is only one use of the flag and no use of the + // value. + if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1)) + return SDValue(); - // Create the vldN-dup node. - EVT Tys[5]; - unsigned n; - for (n = 0; n < NumVecs; ++n) - Tys[n] = VT; - Tys[n] = MVT::Other; - SDVTList SDTys = DAG.getVTList(Tys, NumVecs + 1); - SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; - MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); - SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2, - VLDMemInt->getMemoryVT(), - VLDMemInt->getMemOperand()); - - // Update the uses. - for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); - UI != UE; ++UI) { - unsigned ResNo = UI.getUse().getResNo(); - // Ignore uses of the chain result. - if (ResNo == NumVecs) - continue; - SDNode *User = *UI; - DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); - } + SDValue LHS = Cmp.getOperand(0); + SDValue RHS = Cmp.getOperand(1); - // Now the vldN-lane intrinsic is dead except for its chain result. - // Update uses of the chain. - std::vector<SDValue> VLDDupResults; - for (unsigned n = 0; n < NumVecs; ++n) - VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); - VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); - DCI.CombineTo(VLD, VLDDupResults); + assert(LHS.getValueType() == RHS.getValueType() && + "Expected the value type to be the same for both operands!"); + if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) + return SDValue(); - return SDValue(N, 0); -} + if (isa<ConstantSDNode>(LHS) && cast<ConstantSDNode>(LHS)->isNullValue()) + std::swap(LHS, RHS); -// v1i1 setcc -> -// v1i1 (bitcast (i1 setcc (extract_vector_elt, extract_vector_elt)) -// FIXME: Currently the type legalizer can't handle SETCC having v1i1 as result. -// If it can legalize "v1i1 SETCC" correctly, no need to combine such SETCC. -static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { - EVT ResVT = N->getValueType(0); + if (!isa<ConstantSDNode>(RHS) || !cast<ConstantSDNode>(RHS)->isNullValue()) + return SDValue(); - if (!ResVT.isVector() || ResVT.getVectorNumElements() != 1 || - ResVT.getVectorElementType() != MVT::i1) + if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA || + LHS.getOpcode() == ISD::SRL) return SDValue(); - SDValue LHS = N->getOperand(0); - SDValue RHS = N->getOperand(1); - EVT CmpVT = LHS.getValueType(); - LHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), - CmpVT.getVectorElementType(), LHS, - DAG.getConstant(0, MVT::i64)); - RHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), - CmpVT.getVectorElementType(), RHS, - DAG.getConstant(0, MVT::i64)); - SDValue SetCC = - DAG.getSetCC(SDLoc(N), MVT::i1, LHS, RHS, - cast<CondCodeSDNode>(N->getOperand(2))->get()); - return DAG.getNode(ISD::BITCAST, SDLoc(N), ResVT, SetCC); + // Fold the compare into the branch instruction. + SDValue BR; + if (CC == AArch64CC::EQ) + BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); + else + BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); + + // Do not add new nodes to DAG combiner worklist. + DCI.CombineTo(N, BR, false); + + return SDValue(); } // vselect (v1i1 setcc) -> @@ -4315,7 +7589,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine // such VSELECT. -static SDValue PerformVSelectCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) { SDValue N0 = N->getOperand(0); EVT CCVT = N0.getValueType(); @@ -4340,79 +7614,109 @@ static SDValue PerformVSelectCombine(SDNode *N, SelectionDAG &DAG) { IfTrue, IfFalse); } -// sign_extend (extract_vector_elt (v1i1 setcc)) -> -// extract_vector_elt (v1iXX setcc) -// (XX is the size of the compared operand type) -static SDValue PerformSignExtendCombine(SDNode *N, SelectionDAG &DAG) { +/// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with +/// the compare-mask instructions rather than going via NZCV, even if LHS and +/// RHS are really scalar. This replaces any scalar setcc in the above pattern +/// with a vector one followed by a DUP shuffle on the result. +static SDValue performSelectCombine(SDNode *N, SelectionDAG &DAG) { SDValue N0 = N->getOperand(0); - SDValue Vec = N0.getOperand(0); + EVT ResVT = N->getValueType(0); - if (N0.getOpcode() != ISD::EXTRACT_VECTOR_ELT || - Vec.getOpcode() != ISD::SETCC) + if (!N->getOperand(1).getValueType().isVector()) return SDValue(); - EVT ResVT = N->getValueType(0); - EVT CmpVT = Vec.getOperand(0).getValueType(); - // Only optimize when the result type is of the same size as the element - // type of the compared operand. - if (ResVT.getSizeInBits() != CmpVT.getVectorElementType().getSizeInBits()) + if (N0.getOpcode() != ISD::SETCC || N0.getValueType() != MVT::i1) return SDValue(); - SDValue Lane = N0.getOperand(1); - SDValue SetCC = - DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(), - Vec.getOperand(0), Vec.getOperand(1), - cast<CondCodeSDNode>(Vec.getOperand(2))->get()); - return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ResVT, - SetCC, Lane); + SDLoc DL(N0); + + EVT SrcVT = N0.getOperand(0).getValueType(); + SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, + ResVT.getSizeInBits() / SrcVT.getSizeInBits()); + EVT CCVT = SrcVT.changeVectorElementTypeToInteger(); + + // First perform a vector comparison, where lane 0 is the one we're interested + // in. + SDValue LHS = + DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0)); + SDValue RHS = + DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1)); + SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2)); + + // Now duplicate the comparison mask we want across all other lanes. + SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0); + SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask.data()); + Mask = DAG.getNode(ISD::BITCAST, DL, ResVT.changeVectorElementTypeToInteger(), + Mask); + + return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2)); } -SDValue -AArch64TargetLowering::PerformDAGCombine(SDNode *N, - DAGCombinerInfo &DCI) const { +SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, + DAGCombinerInfo &DCI) const { + SelectionDAG &DAG = DCI.DAG; switch (N->getOpcode()) { - default: break; - case ISD::AND: return PerformANDCombine(N, DCI); - case ISD::OR: return PerformORCombine(N, DCI, getSubtarget()); - case ISD::SHL: - case ISD::SRA: - case ISD::SRL: - return PerformShiftCombine(N, DCI, getSubtarget()); - case ISD::SETCC: return PerformSETCCCombine(N, DCI.DAG); - case ISD::VSELECT: return PerformVSelectCombine(N, DCI.DAG); - case ISD::SIGN_EXTEND: return PerformSignExtendCombine(N, DCI.DAG); + default: + break; + case ISD::ADD: + case ISD::SUB: + return performAddSubLongCombine(N, DCI, DAG); + case ISD::XOR: + return performXorCombine(N, DAG, DCI, Subtarget); + case ISD::MUL: + return performMulCombine(N, DAG, DCI, Subtarget); + case ISD::SINT_TO_FP: + case ISD::UINT_TO_FP: + return performIntToFpCombine(N, DAG); + case ISD::OR: + return performORCombine(N, DCI, Subtarget); case ISD::INTRINSIC_WO_CHAIN: - return PerformIntrinsicCombine(N, DCI.DAG); - case AArch64ISD::NEON_VDUPLANE: - return CombineVLDDUP(N, DCI); - case AArch64ISD::NEON_LD2DUP: - case AArch64ISD::NEON_LD3DUP: - case AArch64ISD::NEON_LD4DUP: - return CombineBaseUpdate(N, DCI); + return performIntrinsicCombine(N, DCI, Subtarget); + case ISD::ANY_EXTEND: + case ISD::ZERO_EXTEND: + case ISD::SIGN_EXTEND: + return performExtendCombine(N, DCI, DAG); + case ISD::BITCAST: + return performBitcastCombine(N, DCI, DAG); + case ISD::CONCAT_VECTORS: + return performConcatVectorsCombine(N, DCI, DAG); + case ISD::SELECT: + return performSelectCombine(N, DAG); + case ISD::VSELECT: + return performVSelectCombine(N, DCI.DAG); + case ISD::STORE: + return performSTORECombine(N, DCI, DAG, Subtarget); + case AArch64ISD::BRCOND: + return performBRCONDCombine(N, DCI, DAG); + case AArch64ISD::DUP: + return performPostLD1Combine(N, DCI, false); + case ISD::INSERT_VECTOR_ELT: + return performPostLD1Combine(N, DCI, true); case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { - case Intrinsic::arm_neon_vld1: - case Intrinsic::arm_neon_vld2: - case Intrinsic::arm_neon_vld3: - case Intrinsic::arm_neon_vld4: - case Intrinsic::arm_neon_vst1: - case Intrinsic::arm_neon_vst2: - case Intrinsic::arm_neon_vst3: - case Intrinsic::arm_neon_vst4: - case Intrinsic::arm_neon_vld2lane: - case Intrinsic::arm_neon_vld3lane: - case Intrinsic::arm_neon_vld4lane: - case Intrinsic::aarch64_neon_vld1x2: - case Intrinsic::aarch64_neon_vld1x3: - case Intrinsic::aarch64_neon_vld1x4: - case Intrinsic::aarch64_neon_vst1x2: - case Intrinsic::aarch64_neon_vst1x3: - case Intrinsic::aarch64_neon_vst1x4: - case Intrinsic::arm_neon_vst2lane: - case Intrinsic::arm_neon_vst3lane: - case Intrinsic::arm_neon_vst4lane: - return CombineBaseUpdate(N, DCI); + case Intrinsic::aarch64_neon_ld2: + case Intrinsic::aarch64_neon_ld3: + case Intrinsic::aarch64_neon_ld4: + case Intrinsic::aarch64_neon_ld1x2: + case Intrinsic::aarch64_neon_ld1x3: + case Intrinsic::aarch64_neon_ld1x4: + case Intrinsic::aarch64_neon_ld2lane: + case Intrinsic::aarch64_neon_ld3lane: + case Intrinsic::aarch64_neon_ld4lane: + case Intrinsic::aarch64_neon_ld2r: + case Intrinsic::aarch64_neon_ld3r: + case Intrinsic::aarch64_neon_ld4r: + case Intrinsic::aarch64_neon_st2: + case Intrinsic::aarch64_neon_st3: + case Intrinsic::aarch64_neon_st4: + case Intrinsic::aarch64_neon_st1x2: + case Intrinsic::aarch64_neon_st1x3: + case Intrinsic::aarch64_neon_st1x4: + case Intrinsic::aarch64_neon_st2lane: + case Intrinsic::aarch64_neon_st3lane: + case Intrinsic::aarch64_neon_st4lane: + return performNEONPostLDSTCombine(N, DCI, DAG); default: break; } @@ -4420,979 +7724,214 @@ AArch64TargetLowering::PerformDAGCombine(SDNode *N, return SDValue(); } -bool -AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { - VT = VT.getScalarType(); - - if (!VT.isSimple()) - return false; - - switch (VT.getSimpleVT().SimpleTy) { - case MVT::f16: - case MVT::f32: - case MVT::f64: - return true; - case MVT::f128: +// Check if the return value is used as only a return value, as otherwise +// we can't perform a tail-call. In particular, we need to check for +// target ISD nodes that are returns and any other "odd" constructs +// that the generic analysis code won't necessarily catch. +bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, + SDValue &Chain) const { + if (N->getNumValues() != 1) return false; - default: - break; - } - - return false; -} -// Check whether a shuffle_vector could be presented as concat_vector. -bool AArch64TargetLowering::isConcatVector(SDValue Op, SelectionDAG &DAG, - SDValue V0, SDValue V1, - const int *Mask, - SDValue &Res) const { - SDLoc DL(Op); - EVT VT = Op.getValueType(); - if (VT.getSizeInBits() != 128) + if (!N->hasNUsesOfValue(1, 0)) return false; - if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() || - VT.getVectorElementType() != V1.getValueType().getVectorElementType()) - return false; - - unsigned NumElts = VT.getVectorNumElements(); - bool isContactVector = true; - bool splitV0 = false; - if (V0.getValueType().getSizeInBits() == 128) - splitV0 = true; - - for (int I = 0, E = NumElts / 2; I != E; I++) { - if (Mask[I] != I) { - isContactVector = false; - break; - } - } - - if (isContactVector) { - int offset = NumElts / 2; - for (int I = NumElts / 2, E = NumElts; I != E; I++) { - if (Mask[I] != I + splitV0 * offset) { - isContactVector = false; - break; - } - } - } - - if (isContactVector) { - EVT CastVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), - NumElts / 2); - if (splitV0) { - V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0, - DAG.getConstant(0, MVT::i64)); - } - if (V1.getValueType().getSizeInBits() == 128) { - V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1, - DAG.getConstant(0, MVT::i64)); - } - Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1); - return true; - } - return false; -} - -// Check whether a Build Vector could be presented as Shuffle Vector. -// This Shuffle Vector maybe not legalized, so the length of its operand and -// the length of result may not equal. -bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, - SDValue &V0, SDValue &V1, - int *Mask) const { - SDLoc DL(Op); - EVT VT = Op.getValueType(); - unsigned NumElts = VT.getVectorNumElements(); - unsigned V0NumElts = 0; - // Check if all elements are extracted from less than 3 vectors. - for (unsigned i = 0; i < NumElts; ++i) { - SDValue Elt = Op.getOperand(i); - if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || - Elt.getOperand(0).getValueType().getVectorElementType() != - VT.getVectorElementType()) + SDValue TCChain = Chain; + SDNode *Copy = *N->use_begin(); + if (Copy->getOpcode() == ISD::CopyToReg) { + // If the copy has a glue operand, we conservatively assume it isn't safe to + // perform a tail call. + if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == + MVT::Glue) return false; + TCChain = Copy->getOperand(0); + } else if (Copy->getOpcode() != ISD::FP_EXTEND) + return false; - if (V0.getNode() == 0) { - V0 = Elt.getOperand(0); - V0NumElts = V0.getValueType().getVectorNumElements(); - } - if (Elt.getOperand(0) == V0) { - Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue()); - continue; - } else if (V1.getNode() == 0) { - V1 = Elt.getOperand(0); - } - if (Elt.getOperand(0) == V1) { - unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue(); - Mask[i] = (Lane + V0NumElts); - continue; - } else { + bool HasRet = false; + for (SDNode *Node : Copy->uses()) { + if (Node->getOpcode() != AArch64ISD::RET_FLAG) return false; - } + HasRet = true; } - return true; -} - -// LowerShiftRightParts - Lower SRL_PARTS and SRA_PARTS, which returns two -/// i64 values and take a 2 x i64 value to shift plus a shift amount. -SDValue AArch64TargetLowering::LowerShiftRightParts(SDValue Op, - SelectionDAG &DAG) const { - assert(Op.getNumOperands() == 3 && "Not a quad-shift!"); - EVT VT = Op.getValueType(); - unsigned VTBits = VT.getSizeInBits(); - SDLoc dl(Op); - SDValue ShOpLo = Op.getOperand(0); - SDValue ShOpHi = Op.getOperand(1); - SDValue ShAmt = Op.getOperand(2); - unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; - - assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); - SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, - DAG.getConstant(VTBits, MVT::i64), ShAmt); - SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); - SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt, - DAG.getConstant(VTBits, MVT::i64)); - SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); - SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); - SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); - SDValue Tmp3 = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); - - SDValue A64cc; - SDValue CmpOp = getSelectableIntSetCC(ExtraShAmt, - DAG.getConstant(0, MVT::i64), - ISD::SETGE, A64cc, - DAG, dl); - SDValue Hi = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp, - DAG.getConstant(0, Tmp3.getValueType()), Tmp3, - A64cc); - SDValue Lo = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp, - TrueVal, FalseVal, A64cc); + if (!HasRet) + return false; - SDValue Ops[2] = { Lo, Hi }; - return DAG.getMergeValues(Ops, 2, dl); + Chain = TCChain; + return true; } -/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two -/// i64 values and take a 2 x i64 value to shift plus a shift amount. -SDValue AArch64TargetLowering::LowerShiftLeftParts(SDValue Op, - SelectionDAG &DAG) const { - assert(Op.getNumOperands() == 3 && "Not a quad-shift!"); - EVT VT = Op.getValueType(); - unsigned VTBits = VT.getSizeInBits(); - SDLoc dl(Op); - SDValue ShOpLo = Op.getOperand(0); - SDValue ShOpHi = Op.getOperand(1); - SDValue ShAmt = Op.getOperand(2); - - assert(Op.getOpcode() == ISD::SHL_PARTS); - SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, - DAG.getConstant(VTBits, MVT::i64), ShAmt); - SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); - SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt, - DAG.getConstant(VTBits, MVT::i64)); - SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); - SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); - SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); - SDValue Tmp4 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); - - SDValue A64cc; - SDValue CmpOp = getSelectableIntSetCC(ExtraShAmt, - DAG.getConstant(0, MVT::i64), - ISD::SETGE, A64cc, - DAG, dl); - - SDValue Lo = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp, - DAG.getConstant(0, Tmp4.getValueType()), Tmp4, - A64cc); - SDValue Hi = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp, - Tmp3, FalseVal, A64cc); +// Return whether the an instruction can potentially be optimized to a tail +// call. This will cause the optimizers to attempt to move, or duplicate, +// return instructions to help enable tail call optimizations for this +// instruction. +bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { + if (!CI->isTailCall()) + return false; - SDValue Ops[2] = { Lo, Hi }; - return DAG.getMergeValues(Ops, 2, dl); + return true; } -// If this is a case we can't handle, return null and let the default -// expansion code take care of it. -SDValue -AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, - const AArch64Subtarget *ST) const { - - BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); - SDLoc DL(Op); - EVT VT = Op.getValueType(); - - APInt SplatBits, SplatUndef; - unsigned SplatBitSize; - bool HasAnyUndefs; - - unsigned UseNeonMov = VT.getSizeInBits() >= 64; - - // Note we favor lowering MOVI over MVNI. - // This has implications on the definition of patterns in TableGen to select - // BIC immediate instructions but not ORR immediate instructions. - // If this lowering order is changed, TableGen patterns for BIC immediate and - // ORR immediate instructions have to be updated. - if (UseNeonMov && - BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { - if (SplatBitSize <= 64) { - // First attempt to use vector immediate-form MOVI - EVT NeonMovVT; - unsigned Imm = 0; - unsigned OpCmode = 0; - - if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), - SplatBitSize, DAG, VT.is128BitVector(), - Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) { - SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); - SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); - - if (ImmVal.getNode() && OpCmodeVal.getNode()) { - SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT, - ImmVal, OpCmodeVal); - return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); - } - } - - // Then attempt to use vector immediate-form MVNI - uint64_t NegatedImm = (~SplatBits).getZExtValue(); - if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, - DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT, - Imm, OpCmode)) { - SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); - SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); - if (ImmVal.getNode() && OpCmodeVal.getNode()) { - SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT, - ImmVal, OpCmodeVal); - return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); - } - } - - // Attempt to use vector immediate-form FMOV - if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) || - (VT == MVT::v2f64 && SplatBitSize == 64)) { - APFloat RealVal( - SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble, - SplatBits); - uint32_t ImmVal; - if (A64Imms::isFPImm(RealVal, ImmVal)) { - SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); - return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val); - } - } - } - } - - unsigned NumElts = VT.getVectorNumElements(); - bool isOnlyLowElement = true; - bool usesOnlyOneValue = true; - bool hasDominantValue = false; - bool isConstant = true; - - // Map of the number of times a particular SDValue appears in the - // element list. - DenseMap<SDValue, unsigned> ValueCounts; - SDValue Value; - for (unsigned i = 0; i < NumElts; ++i) { - SDValue V = Op.getOperand(i); - if (V.getOpcode() == ISD::UNDEF) - continue; - if (i > 0) - isOnlyLowElement = false; - if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) - isConstant = false; - - ValueCounts.insert(std::make_pair(V, 0)); - unsigned &Count = ValueCounts[V]; - - // Is this value dominant? (takes up more than half of the lanes) - if (++Count > (NumElts / 2)) { - hasDominantValue = true; - Value = V; - } - } - if (ValueCounts.size() != 1) - usesOnlyOneValue = false; - if (!Value.getNode() && ValueCounts.size() > 0) - Value = ValueCounts.begin()->first; - - if (ValueCounts.size() == 0) - return DAG.getUNDEF(VT); - - if (isOnlyLowElement) - return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); - - unsigned EltSize = VT.getVectorElementType().getSizeInBits(); - if (hasDominantValue && EltSize <= 64) { - // Use VDUP for non-constant splats. - if (!isConstant) { - SDValue N; - - // If we are DUPing a value that comes directly from a vector, we could - // just use DUPLANE. We can only do this if the lane being extracted - // is at a constant index, as the DUP from lane instructions only have - // constant-index forms. - // - // If there is a TRUNCATE between EXTRACT_VECTOR_ELT and DUP, we can - // remove TRUNCATE for DUPLANE by apdating the source vector to - // appropriate vector type and lane index. - // - // FIXME: for now we have v1i8, v1i16, v1i32 legal vector types, if they - // are not legal any more, no need to check the type size in bits should - // be large than 64. - SDValue V = Value; - if (Value->getOpcode() == ISD::TRUNCATE) - V = Value->getOperand(0); - if (V->getOpcode() == ISD::EXTRACT_VECTOR_ELT && - isa<ConstantSDNode>(V->getOperand(1)) && - V->getOperand(0).getValueType().getSizeInBits() >= 64) { - - // If the element size of source vector is larger than DUPLANE - // element size, we can do transformation by, - // 1) bitcasting source register to smaller element vector - // 2) mutiplying the lane index by SrcEltSize/ResEltSize - // For example, we can lower - // "v8i16 vdup_lane(v4i32, 1)" - // to be - // "v8i16 vdup_lane(v8i16 bitcast(v4i32), 2)". - SDValue SrcVec = V->getOperand(0); - unsigned SrcEltSize = - SrcVec.getValueType().getVectorElementType().getSizeInBits(); - unsigned ResEltSize = VT.getVectorElementType().getSizeInBits(); - if (SrcEltSize > ResEltSize) { - assert((SrcEltSize % ResEltSize == 0) && "Invalid element size"); - SDValue BitCast; - unsigned SrcSize = SrcVec.getValueType().getSizeInBits(); - unsigned ResSize = VT.getSizeInBits(); - - if (SrcSize > ResSize) { - assert((SrcSize % ResSize == 0) && "Invalid vector size"); - EVT CastVT = - EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), - SrcSize / ResEltSize); - BitCast = DAG.getNode(ISD::BITCAST, DL, CastVT, SrcVec); - } else { - assert((SrcSize == ResSize) && "Invalid vector size of source vec"); - BitCast = DAG.getNode(ISD::BITCAST, DL, VT, SrcVec); - } - - unsigned LaneIdx = V->getConstantOperandVal(1); - SDValue Lane = - DAG.getConstant((SrcEltSize / ResEltSize) * LaneIdx, MVT::i64); - N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, BitCast, Lane); - } else { - assert((SrcEltSize == ResEltSize) && - "Invalid element size of source vec"); - N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, V->getOperand(0), - V->getOperand(1)); - } - } else - N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); - - if (!usesOnlyOneValue) { - // The dominant value was splatted as 'N', but we now have to insert - // all differing elements. - for (unsigned I = 0; I < NumElts; ++I) { - if (Op.getOperand(I) == Value) - continue; - SmallVector<SDValue, 3> Ops; - Ops.push_back(N); - Ops.push_back(Op.getOperand(I)); - Ops.push_back(DAG.getConstant(I, MVT::i64)); - N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3); - } - } - return N; - } - if (usesOnlyOneValue && isConstant) { - return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); - } - } - // If all elements are constants and the case above didn't get hit, fall back - // to the default expansion, which will generate a load from the constant - // pool. - if (isConstant) - return SDValue(); - - // Try to lower this in lowering ShuffleVector way. - SDValue V0, V1; - int Mask[16]; - if (isKnownShuffleVector(Op, DAG, V0, V1, Mask)) { - unsigned V0NumElts = V0.getValueType().getVectorNumElements(); - if (!V1.getNode() && V0NumElts == NumElts * 2) { - V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0, - DAG.getConstant(NumElts, MVT::i64)); - V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0, - DAG.getConstant(0, MVT::i64)); - V0NumElts = V0.getValueType().getVectorNumElements(); - } - - if (V1.getNode() && NumElts == V0NumElts && - V0NumElts == V1.getValueType().getVectorNumElements()) { - SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask); - if (Shuffle.getOpcode() != ISD::VECTOR_SHUFFLE) - return Shuffle; - else - return LowerVECTOR_SHUFFLE(Shuffle, DAG); - } else { - SDValue Res; - if (isConcatVector(Op, DAG, V0, V1, Mask, Res)) - return Res; - } - } +bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base, + SDValue &Offset, + ISD::MemIndexedMode &AM, + bool &IsInc, + SelectionDAG &DAG) const { + if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) + return false; - // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we - // know the default expansion would otherwise fall back on something even - // worse. For a vector with one or two non-undef values, that's - // scalar_to_vector for the elements followed by a shuffle (provided the - // shuffle is valid for the target) and materialization element by element - // on the stack followed by a load for everything else. - if (!isConstant && !usesOnlyOneValue) { - SDValue Vec = DAG.getUNDEF(VT); - for (unsigned i = 0 ; i < NumElts; ++i) { - SDValue V = Op.getOperand(i); - if (V.getOpcode() == ISD::UNDEF) - continue; - SDValue LaneIdx = DAG.getConstant(i, MVT::i64); - Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx); - } - return Vec; + Base = Op->getOperand(0); + // All of the indexed addressing mode instructions take a signed + // 9 bit immediate offset. + if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { + int64_t RHSC = (int64_t)RHS->getZExtValue(); + if (RHSC >= 256 || RHSC <= -256) + return false; + IsInc = (Op->getOpcode() == ISD::ADD); + Offset = Op->getOperand(1); + return true; } - return SDValue(); + return false; } -/// isREVMask - Check if a vector shuffle corresponds to a REV -/// instruction with the specified blocksize. (The order of the elements -/// within each block of the vector is reversed.) -static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { - assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && - "Only possible block sizes for REV are: 16, 32, 64"); - - unsigned EltSz = VT.getVectorElementType().getSizeInBits(); - if (EltSz == 64) +bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, + SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const { + EVT VT; + SDValue Ptr; + if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { + VT = LD->getMemoryVT(); + Ptr = LD->getBasePtr(); + } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { + VT = ST->getMemoryVT(); + Ptr = ST->getBasePtr(); + } else return false; - unsigned NumElts = VT.getVectorNumElements(); - unsigned BlockElts = M[0] + 1; - // If the first shuffle index is UNDEF, be optimistic. - if (M[0] < 0) - BlockElts = BlockSize / EltSz; - - if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) + bool IsInc; + if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG)) return false; - - for (unsigned i = 0; i < NumElts; ++i) { - if (M[i] < 0) - continue; // ignore UNDEF indices - if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) - return false; - } - + AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC; return true; } -// isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and -// TRN instruction. -static unsigned isPermuteMask(ArrayRef<int> M, EVT VT, bool isV2undef) { - unsigned NumElts = VT.getVectorNumElements(); - if (NumElts < 4) - return 0; - - bool ismatch = true; - - // Check UZP1 - for (unsigned i = 0; i < NumElts; ++i) { - unsigned answer = i * 2; - if (isV2undef && answer >= NumElts) - answer -= NumElts; - if (M[i] != -1 && (unsigned)M[i] != answer) { - ismatch = false; - break; - } - } - if (ismatch) - return AArch64ISD::NEON_UZP1; - - // Check UZP2 - ismatch = true; - for (unsigned i = 0; i < NumElts; ++i) { - unsigned answer = i * 2 + 1; - if (isV2undef && answer >= NumElts) - answer -= NumElts; - if (M[i] != -1 && (unsigned)M[i] != answer) { - ismatch = false; - break; - } - } - if (ismatch) - return AArch64ISD::NEON_UZP2; - - // Check ZIP1 - ismatch = true; - for (unsigned i = 0; i < NumElts; ++i) { - unsigned answer = i / 2 + NumElts * (i % 2); - if (isV2undef && answer >= NumElts) - answer -= NumElts; - if (M[i] != -1 && (unsigned)M[i] != answer) { - ismatch = false; - break; - } - } - if (ismatch) - return AArch64ISD::NEON_ZIP1; - - // Check ZIP2 - ismatch = true; - for (unsigned i = 0; i < NumElts; ++i) { - unsigned answer = (NumElts + i) / 2 + NumElts * (i % 2); - if (isV2undef && answer >= NumElts) - answer -= NumElts; - if (M[i] != -1 && (unsigned)M[i] != answer) { - ismatch = false; - break; - } - } - if (ismatch) - return AArch64ISD::NEON_ZIP2; - - // Check TRN1 - ismatch = true; - for (unsigned i = 0; i < NumElts; ++i) { - unsigned answer = i + (NumElts - 1) * (i % 2); - if (isV2undef && answer >= NumElts) - answer -= NumElts; - if (M[i] != -1 && (unsigned)M[i] != answer) { - ismatch = false; - break; - } - } - if (ismatch) - return AArch64ISD::NEON_TRN1; - - // Check TRN2 - ismatch = true; - for (unsigned i = 0; i < NumElts; ++i) { - unsigned answer = 1 + i + (NumElts - 1) * (i % 2); - if (isV2undef && answer >= NumElts) - answer -= NumElts; - if (M[i] != -1 && (unsigned)M[i] != answer) { - ismatch = false; - break; - } - } - if (ismatch) - return AArch64ISD::NEON_TRN2; - - return 0; -} - -SDValue -AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, - SelectionDAG &DAG) const { - SDValue V1 = Op.getOperand(0); - SDValue V2 = Op.getOperand(1); - SDLoc dl(Op); - EVT VT = Op.getValueType(); - ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); - - // Convert shuffles that are directly supported on NEON to target-specific - // DAG nodes, instead of keeping them as shuffles and matching them again - // during code selection. This is more efficient and avoids the possibility - // of inconsistencies between legalization and selection. - ArrayRef<int> ShuffleMask = SVN->getMask(); - - unsigned EltSize = VT.getVectorElementType().getSizeInBits(); - if (EltSize > 64) - return SDValue(); - - if (isREVMask(ShuffleMask, VT, 64)) - return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1); - if (isREVMask(ShuffleMask, VT, 32)) - return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1); - if (isREVMask(ShuffleMask, VT, 16)) - return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1); - - unsigned ISDNo; - if (V2.getOpcode() == ISD::UNDEF) - ISDNo = isPermuteMask(ShuffleMask, VT, true); - else - ISDNo = isPermuteMask(ShuffleMask, VT, false); - - if (ISDNo) { - if (V2.getOpcode() == ISD::UNDEF) - return DAG.getNode(ISDNo, dl, VT, V1, V1); - else - return DAG.getNode(ISDNo, dl, VT, V1, V2); - } - - SDValue Res; - if (isConcatVector(Op, DAG, V1, V2, &ShuffleMask[0], Res)) - return Res; - - // If the element of shuffle mask are all the same constant, we can - // transform it into either NEON_VDUP or NEON_VDUPLANE - if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { - int Lane = SVN->getSplatIndex(); - // If this is undef splat, generate it via "just" vdup, if possible. - if (Lane == -1) Lane = 0; - - // Test if V1 is a SCALAR_TO_VECTOR. - if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { - return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0)); - } - // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR. - if (V1.getOpcode() == ISD::BUILD_VECTOR) { - bool IsScalarToVector = true; - for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i) - if (V1.getOperand(i).getOpcode() != ISD::UNDEF && - i != (unsigned)Lane) { - IsScalarToVector = false; - break; - } - if (IsScalarToVector) - return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, - V1.getOperand(Lane)); - } - - // Test if V1 is a EXTRACT_SUBVECTOR. - if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) { - int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue(); - return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0), - DAG.getConstant(Lane + ExtLane, MVT::i64)); - } - // Test if V1 is a CONCAT_VECTORS. - if (V1.getOpcode() == ISD::CONCAT_VECTORS && - V1.getOperand(1).getOpcode() == ISD::UNDEF) { - SDValue Op0 = V1.getOperand(0); - assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() && - "Invalid vector lane access"); - return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0, - DAG.getConstant(Lane, MVT::i64)); - } - - return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1, - DAG.getConstant(Lane, MVT::i64)); - } - - int Length = ShuffleMask.size(); - int V1EltNum = V1.getValueType().getVectorNumElements(); - - // If the number of v1 elements is the same as the number of shuffle mask - // element and the shuffle masks are sequential values, we can transform - // it into NEON_VEXTRACT. - if (V1EltNum == Length) { - // Check if the shuffle mask is sequential. - int SkipUndef = 0; - while (ShuffleMask[SkipUndef] == -1) { - SkipUndef++; - } - int CurMask = ShuffleMask[SkipUndef]; - if (CurMask >= SkipUndef) { - bool IsSequential = true; - for (int I = SkipUndef; I < Length; ++I) { - if (ShuffleMask[I] != -1 && ShuffleMask[I] != CurMask) { - IsSequential = false; - break; - } - CurMask++; - } - if (IsSequential) { - assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect"); - unsigned VecSize = EltSize * V1EltNum; - unsigned Index = (EltSize / 8) * (ShuffleMask[SkipUndef] - SkipUndef); - if (VecSize == 64 || VecSize == 128) - return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2, - DAG.getConstant(Index, MVT::i64)); - } - } - } - - // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert - // by element from V2 to V1 . - // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a - // better choice to be inserted than V1 as less insert needed, so we count - // element to be inserted for both V1 and V2, and select less one as insert - // target. - - // Collect elements need to be inserted and their index. - SmallVector<int, 8> NV1Elt; - SmallVector<int, 8> N1Index; - SmallVector<int, 8> NV2Elt; - SmallVector<int, 8> N2Index; - for (int I = 0; I != Length; ++I) { - if (ShuffleMask[I] != I) { - NV1Elt.push_back(ShuffleMask[I]); - N1Index.push_back(I); - } - } - for (int I = 0; I != Length; ++I) { - if (ShuffleMask[I] != (I + V1EltNum)) { - NV2Elt.push_back(ShuffleMask[I]); - N2Index.push_back(I); - } - } - - // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2 - // will be inserted. - SDValue InsV = V1; - SmallVector<int, 8> InsMasks = NV1Elt; - SmallVector<int, 8> InsIndex = N1Index; - if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) { - if (NV1Elt.size() > NV2Elt.size()) { - InsV = V2; - InsMasks = NV2Elt; - InsIndex = N2Index; - } - } else { - InsV = DAG.getNode(ISD::UNDEF, dl, VT); - } - - for (int I = 0, E = InsMasks.size(); I != E; ++I) { - SDValue ExtV = V1; - int Mask = InsMasks[I]; - if (Mask >= V1EltNum) { - ExtV = V2; - Mask -= V1EltNum; - } - // Any value type smaller than i32 is illegal in AArch64, and this lower - // function is called after legalize pass, so we need to legalize - // the result here. - EVT EltVT; - if (VT.getVectorElementType().isFloatingPoint()) - EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32; - else - EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32; +bool AArch64TargetLowering::getPostIndexedAddressParts( + SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, + ISD::MemIndexedMode &AM, SelectionDAG &DAG) const { + EVT VT; + SDValue Ptr; + if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { + VT = LD->getMemoryVT(); + Ptr = LD->getBasePtr(); + } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { + VT = ST->getMemoryVT(); + Ptr = ST->getBasePtr(); + } else + return false; - if (Mask >= 0) { - ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV, - DAG.getConstant(Mask, MVT::i64)); - InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV, - DAG.getConstant(InsIndex[I], MVT::i64)); - } - } - return InsV; + bool IsInc; + if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG)) + return false; + // Post-indexing updates the base, so it's not a valid transform + // if that's not the same as the load's pointer. + if (Ptr != Base) + return false; + AM = IsInc ? ISD::POST_INC : ISD::POST_DEC; + return true; } -AArch64TargetLowering::ConstraintType -AArch64TargetLowering::getConstraintType(const std::string &Constraint) const { - if (Constraint.size() == 1) { - switch (Constraint[0]) { - default: break; - case 'w': // An FP/SIMD vector register - return C_RegisterClass; - case 'I': // Constant that can be used with an ADD instruction - case 'J': // Constant that can be used with a SUB instruction - case 'K': // Constant that can be used with a 32-bit logical instruction - case 'L': // Constant that can be used with a 64-bit logical instruction - case 'M': // Constant that can be used as a 32-bit MOV immediate - case 'N': // Constant that can be used as a 64-bit MOV immediate - case 'Y': // Floating point constant zero - case 'Z': // Integer constant zero - return C_Other; - case 'Q': // A memory reference with base register and no offset - return C_Memory; - case 'S': // A symbolic address - return C_Other; - } +void AArch64TargetLowering::ReplaceNodeResults( + SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { + switch (N->getOpcode()) { + default: + llvm_unreachable("Don't know how to custom expand this"); + case ISD::FP_TO_UINT: + case ISD::FP_TO_SINT: + assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion"); + // Let normal code take care of it by not adding anything to Results. + return; } - - // FIXME: Ump, Utf, Usa, Ush - // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes, - // whatever they may be - // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be - // Usa: An absolute symbolic address - // Ush: The high part (bits 32:12) of a pc-relative symbolic address - assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa" - && Constraint != "Ush" && "Unimplemented constraints"); - - return TargetLowering::getConstraintType(Constraint); } -TargetLowering::ConstraintWeight -AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info, - const char *Constraint) const { - - llvm_unreachable("Constraint weight unimplemented"); +bool AArch64TargetLowering::shouldExpandAtomicInIR(Instruction *Inst) const { + // Loads and stores less than 128-bits are already atomic; ones above that + // are doomed anyway, so defer to the default libcall and blame the OS when + // things go wrong: + if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) + return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 128; + else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) + return LI->getType()->getPrimitiveSizeInBits() == 128; + + // For the real atomic operations, we have ldxr/stxr up to 128 bits. + return Inst->getType()->getPrimitiveSizeInBits() <= 128; } -void -AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op, - std::string &Constraint, - std::vector<SDValue> &Ops, - SelectionDAG &DAG) const { - SDValue Result(0, 0); - - // Only length 1 constraints are C_Other. - if (Constraint.size() != 1) return; - - // Only C_Other constraints get lowered like this. That means constants for us - // so return early if there's no hope the constraint can be lowered. - - switch(Constraint[0]) { - default: break; - case 'I': case 'J': case 'K': case 'L': - case 'M': case 'N': case 'Z': { - ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); - if (!C) - return; - - uint64_t CVal = C->getZExtValue(); - uint32_t Bits; - - switch (Constraint[0]) { - default: - // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J' - // is a peculiarly useless SUB constraint. - llvm_unreachable("Unimplemented C_Other constraint"); - case 'I': - if (CVal <= 0xfff) - break; - return; - case 'K': - if (A64Imms::isLogicalImm(32, CVal, Bits)) - break; - return; - case 'L': - if (A64Imms::isLogicalImm(64, CVal, Bits)) - break; - return; - case 'Z': - if (CVal == 0) - break; - return; - } - - Result = DAG.getTargetConstant(CVal, Op.getValueType()); - break; - } - case 'S': { - // An absolute symbolic address or label reference. - if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { - Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), - GA->getValueType(0)); - } else if (const BlockAddressSDNode *BA - = dyn_cast<BlockAddressSDNode>(Op)) { - Result = DAG.getTargetBlockAddress(BA->getBlockAddress(), - BA->getValueType(0)); - } else if (const ExternalSymbolSDNode *ES - = dyn_cast<ExternalSymbolSDNode>(Op)) { - Result = DAG.getTargetExternalSymbol(ES->getSymbol(), - ES->getValueType(0)); - } else - return; - break; - } - case 'Y': - if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { - if (CFP->isExactlyValue(0.0)) { - Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0)); - break; - } - } - return; +Value *AArch64TargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, + AtomicOrdering Ord) const { + Module *M = Builder.GetInsertBlock()->getParent()->getParent(); + Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); + bool IsAcquire = + Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent; + + // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd + // intrinsic must return {i64, i64} and we have to recombine them into a + // single i128 here. + if (ValTy->getPrimitiveSizeInBits() == 128) { + Intrinsic::ID Int = + IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp; + Function *Ldxr = llvm::Intrinsic::getDeclaration(M, Int); + + Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); + Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi"); + + Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); + Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); + Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); + Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); + return Builder.CreateOr( + Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64"); } - if (Result.getNode()) { - Ops.push_back(Result); - return; - } + Type *Tys[] = { Addr->getType() }; + Intrinsic::ID Int = + IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr; + Function *Ldxr = llvm::Intrinsic::getDeclaration(M, Int, Tys); - // It's an unknown constraint for us. Let generic code have a go. - TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); + return Builder.CreateTruncOrBitCast( + Builder.CreateCall(Ldxr, Addr), + cast<PointerType>(Addr->getType())->getElementType()); } -std::pair<unsigned, const TargetRegisterClass*> -AArch64TargetLowering::getRegForInlineAsmConstraint( - const std::string &Constraint, - MVT VT) const { - if (Constraint.size() == 1) { - switch (Constraint[0]) { - case 'r': - if (VT.getSizeInBits() <= 32) - return std::make_pair(0U, &AArch64::GPR32RegClass); - else if (VT == MVT::i64) - return std::make_pair(0U, &AArch64::GPR64RegClass); - break; - case 'w': - if (VT == MVT::f16) - return std::make_pair(0U, &AArch64::FPR16RegClass); - else if (VT == MVT::f32) - return std::make_pair(0U, &AArch64::FPR32RegClass); - else if (VT.getSizeInBits() == 64) - return std::make_pair(0U, &AArch64::FPR64RegClass); - else if (VT.getSizeInBits() == 128) - return std::make_pair(0U, &AArch64::FPR128RegClass); - break; - } +Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder, + Value *Val, Value *Addr, + AtomicOrdering Ord) const { + Module *M = Builder.GetInsertBlock()->getParent()->getParent(); + bool IsRelease = + Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent; + + // Since the intrinsics must have legal type, the i128 intrinsics take two + // parameters: "i64, i64". We must marshal Val into the appropriate form + // before the call. + if (Val->getType()->getPrimitiveSizeInBits() == 128) { + Intrinsic::ID Int = + IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp; + Function *Stxr = Intrinsic::getDeclaration(M, Int); + Type *Int64Ty = Type::getInt64Ty(M->getContext()); + + Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo"); + Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi"); + Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); + return Builder.CreateCall3(Stxr, Lo, Hi, Addr); } - // Use the default implementation in TargetLowering to convert the register - // constraint into a member of a register class. - return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); -} + Intrinsic::ID Int = + IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr; + Type *Tys[] = { Addr->getType() }; + Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys); -/// Represent NEON load and store intrinsics as MemIntrinsicNodes. -/// The associated MachineMemOperands record the alignment specified -/// in the intrinsic calls. -bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, - const CallInst &I, - unsigned Intrinsic) const { - switch (Intrinsic) { - case Intrinsic::arm_neon_vld1: - case Intrinsic::arm_neon_vld2: - case Intrinsic::arm_neon_vld3: - case Intrinsic::arm_neon_vld4: - case Intrinsic::aarch64_neon_vld1x2: - case Intrinsic::aarch64_neon_vld1x3: - case Intrinsic::aarch64_neon_vld1x4: - case Intrinsic::arm_neon_vld2lane: - case Intrinsic::arm_neon_vld3lane: - case Intrinsic::arm_neon_vld4lane: { - Info.opc = ISD::INTRINSIC_W_CHAIN; - // Conservatively set memVT to the entire set of vectors loaded. - uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; - Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); - Info.ptrVal = I.getArgOperand(0); - Info.offset = 0; - Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); - Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); - Info.vol = false; // volatile loads with NEON intrinsics not supported - Info.readMem = true; - Info.writeMem = false; - return true; - } - case Intrinsic::arm_neon_vst1: - case Intrinsic::arm_neon_vst2: - case Intrinsic::arm_neon_vst3: - case Intrinsic::arm_neon_vst4: - case Intrinsic::aarch64_neon_vst1x2: - case Intrinsic::aarch64_neon_vst1x3: - case Intrinsic::aarch64_neon_vst1x4: - case Intrinsic::arm_neon_vst2lane: - case Intrinsic::arm_neon_vst3lane: - case Intrinsic::arm_neon_vst4lane: { - Info.opc = ISD::INTRINSIC_VOID; - // Conservatively set memVT to the entire set of vectors stored. - unsigned NumElts = 0; - for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { - Type *ArgTy = I.getArgOperand(ArgI)->getType(); - if (!ArgTy->isVectorTy()) - break; - NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; - } - Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); - Info.ptrVal = I.getArgOperand(0); - Info.offset = 0; - Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); - Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); - Info.vol = false; // volatile stores with NEON intrinsics not supported - Info.readMem = false; - Info.writeMem = true; - return true; - } - default: - break; - } - - return false; + return Builder.CreateCall2( + Stxr, Builder.CreateZExtOrBitCast( + Val, Stxr->getFunctionType()->getParamType(0)), + Addr); } |