X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FSystemZ%2FSystemZISelDAGToDAG.cpp;h=7febed25cb84cc09328d35f21365ee76fcd691a7;hb=68ad65c25d66d34098dfa00ae78eaf38b0ab4ecf;hp=a1eb6b379b854c735bee181dd6fa13d9d4b9cf4f;hpb=17aa68055beed6faa48ca3a995c5b6fdf5092fd4;p=oota-llvm.git diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index a1eb6b379b8..7febed25cb8 100644 --- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -1,4 +1,4 @@ -//==-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ ---===// +//===-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ --===// // // The LLVM Compiler Infrastructure // @@ -11,763 +11,1124 @@ // //===----------------------------------------------------------------------===// -#include "SystemZ.h" #include "SystemZTargetMachine.h" -#include "llvm/DerivedTypes.h" -#include "llvm/Function.h" -#include "llvm/Intrinsics.h" -#include "llvm/CallingConv.h" -#include "llvm/Constants.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/SelectionDAGISel.h" -#include "llvm/Target/TargetLowering.h" -#include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" + using namespace llvm; namespace { - /// SystemZRRIAddressMode - This corresponds to rriaddr, but uses SDValue's - /// instead of register numbers for the leaves of the matched tree. - struct SystemZRRIAddressMode { - enum { - RegBase, - FrameIndexBase - } BaseType; - - struct { // This is really a union, discriminated by BaseType! - SDValue Reg; - int FrameIndex; - } Base; - - SDValue IndexReg; - int64_t Disp; - bool isRI; - - SystemZRRIAddressMode(bool RI = false) - : BaseType(RegBase), IndexReg(), Disp(0), isRI(RI) { - } +// Used to build addressing modes. +struct SystemZAddressingMode { + // The shape of the address. + enum AddrForm { + // base+displacement + FormBD, - void dump() { - errs() << "SystemZRRIAddressMode " << this << '\n'; - if (BaseType == RegBase) { - errs() << "Base.Reg "; - if (Base.Reg.getNode() != 0) - Base.Reg.getNode()->dump(); - else - errs() << "nul"; - errs() << '\n'; - } else { - errs() << " Base.FrameIndex " << Base.FrameIndex << '\n'; - } - if (!isRI) { - errs() << "IndexReg "; - if (IndexReg.getNode() != 0) IndexReg.getNode()->dump(); - else errs() << "nul"; - } - errs() << " Disp " << Disp << '\n'; - } + // base+displacement+index for load and store operands + FormBDXNormal, + + // base+displacement+index for load address operands + FormBDXLA, + + // base+displacement+index+ADJDYNALLOC + FormBDXDynAlloc }; -} + AddrForm Form; + + // The type of displacement. The enum names here correspond directly + // to the definitions in SystemZOperand.td. We could split them into + // flags -- single/pair, 128-bit, etc. -- but it hardly seems worth it. + enum DispRange { + Disp12Only, + Disp12Pair, + Disp20Only, + Disp20Only128, + Disp20Pair + }; + DispRange DR; -/// SystemZDAGToDAGISel - SystemZ specific code to select SystemZ machine -/// instructions for SelectionDAG operations. -/// -namespace { - class SystemZDAGToDAGISel : public SelectionDAGISel { - const SystemZTargetLowering &Lowering; - const SystemZSubtarget &Subtarget; - - void getAddressOperandsRI(const SystemZRRIAddressMode &AM, - SDValue &Base, SDValue &Disp); - void getAddressOperands(const SystemZRRIAddressMode &AM, - SDValue &Base, SDValue &Disp, - SDValue &Index); - - public: - SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel) - : SelectionDAGISel(TM, OptLevel), - Lowering(*TM.getTargetLowering()), - Subtarget(*TM.getSubtargetImpl()) { } - - virtual const char *getPassName() const { - return "SystemZ DAG->DAG Pattern Instruction Selection"; - } + // The parts of the address. The address is equivalent to: + // + // Base + Disp + Index + (IncludesDynAlloc ? ADJDYNALLOC : 0) + SDValue Base; + int64_t Disp; + SDValue Index; + bool IncludesDynAlloc; - /// getI8Imm - Return a target constant with the specified value, of type - /// i8. - inline SDValue getI8Imm(uint64_t Imm) { - return CurDAG->getTargetConstant(Imm, MVT::i8); - } + SystemZAddressingMode(AddrForm form, DispRange dr) + : Form(form), DR(dr), Base(), Disp(0), Index(), + IncludesDynAlloc(false) {} + + // True if the address can have an index register. + bool hasIndexField() { return Form != FormBD; } + + // True if the address can (and must) include ADJDYNALLOC. + bool isDynAlloc() { return Form == FormBDXDynAlloc; } + + void dump() { + errs() << "SystemZAddressingMode " << this << '\n'; - /// getI16Imm - Return a target constant with the specified value, of type - /// i16. - inline SDValue getI16Imm(uint64_t Imm) { - return CurDAG->getTargetConstant(Imm, MVT::i16); + errs() << " Base "; + if (Base.getNode() != 0) + Base.getNode()->dump(); + else + errs() << "null\n"; + + if (hasIndexField()) { + errs() << " Index "; + if (Index.getNode() != 0) + Index.getNode()->dump(); + else + errs() << "null\n"; } - /// getI32Imm - Return a target constant with the specified value, of type - /// i32. - inline SDValue getI32Imm(uint64_t Imm) { - return CurDAG->getTargetConstant(Imm, MVT::i32); + errs() << " Disp " << Disp; + if (IncludesDynAlloc) + errs() << " + ADJDYNALLOC"; + errs() << '\n'; + } +}; + +// Return a mask with Count low bits set. +static uint64_t allOnes(unsigned int Count) { + return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1; +} + +// Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation +// given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and +// Rotate (I5). The combined operand value is effectively: +// +// (or (rotl Input, Rotate), ~Mask) +// +// for RNSBG and: +// +// (and (rotl Input, Rotate), Mask) +// +// otherwise. The output value has BitSize bits, although Input may be +// narrower (in which case the upper bits are don't care). +struct RxSBGOperands { + RxSBGOperands(unsigned Op, SDValue N) + : Opcode(Op), BitSize(N.getValueType().getSizeInBits()), + Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63), + Rotate(0) {} + + unsigned Opcode; + unsigned BitSize; + uint64_t Mask; + SDValue Input; + unsigned Start; + unsigned End; + unsigned Rotate; +}; + +class SystemZDAGToDAGISel : public SelectionDAGISel { + const SystemZTargetLowering &Lowering; + const SystemZSubtarget &Subtarget; + + // Used by SystemZOperands.td to create integer constants. + inline SDValue getImm(const SDNode *Node, uint64_t Imm) const { + return CurDAG->getTargetConstant(Imm, Node->getValueType(0)); + } + + const SystemZTargetMachine &getTargetMachine() const { + return static_cast(TM); + } + + const SystemZInstrInfo *getInstrInfo() const { + return getTargetMachine().getInstrInfo(); + } + + // Try to fold more of the base or index of AM into AM, where IsBase + // selects between the base and index. + bool expandAddress(SystemZAddressingMode &AM, bool IsBase) const; + + // Try to describe N in AM, returning true on success. + bool selectAddress(SDValue N, SystemZAddressingMode &AM) const; + + // Extract individual target operands from matched address AM. + void getAddressOperands(const SystemZAddressingMode &AM, EVT VT, + SDValue &Base, SDValue &Disp) const; + void getAddressOperands(const SystemZAddressingMode &AM, EVT VT, + SDValue &Base, SDValue &Disp, SDValue &Index) const; + + // Try to match Addr as a FormBD address with displacement type DR. + // Return true on success, storing the base and displacement in + // Base and Disp respectively. + bool selectBDAddr(SystemZAddressingMode::DispRange DR, SDValue Addr, + SDValue &Base, SDValue &Disp) const; + + // Try to match Addr as a FormBDX address with displacement type DR. + // Return true on success and if the result had no index. Store the + // base and displacement in Base and Disp respectively. + bool selectMVIAddr(SystemZAddressingMode::DispRange DR, SDValue Addr, + SDValue &Base, SDValue &Disp) const; + + // Try to match Addr as a FormBDX* address of form Form with + // displacement type DR. Return true on success, storing the base, + // displacement and index in Base, Disp and Index respectively. + bool selectBDXAddr(SystemZAddressingMode::AddrForm Form, + SystemZAddressingMode::DispRange DR, SDValue Addr, + SDValue &Base, SDValue &Disp, SDValue &Index) const; + + // PC-relative address matching routines used by SystemZOperands.td. + bool selectPCRelAddress(SDValue Addr, SDValue &Target) const { + if (SystemZISD::isPCREL(Addr.getOpcode())) { + Target = Addr.getOperand(0); + return true; } + return false; + } - // Include the pieces autogenerated from the target description. - #include "SystemZGenDAGISel.inc" - - private: - bool SelectAddrRI12Only(SDNode *Op, SDValue& Addr, - SDValue &Base, SDValue &Disp); - bool SelectAddrRI12(SDNode *Op, SDValue& Addr, - SDValue &Base, SDValue &Disp, - bool is12BitOnly = false); - bool SelectAddrRI(SDNode *Op, SDValue& Addr, - SDValue &Base, SDValue &Disp); - bool SelectAddrRRI12(SDNode *Op, SDValue Addr, - SDValue &Base, SDValue &Disp, SDValue &Index); - bool SelectAddrRRI20(SDNode *Op, SDValue Addr, - SDValue &Base, SDValue &Disp, SDValue &Index); - bool SelectLAAddr(SDNode *Op, SDValue Addr, - SDValue &Base, SDValue &Disp, SDValue &Index); - - SDNode *Select(SDNode *Node); - - bool TryFoldLoad(SDNode *P, SDValue N, - SDValue &Base, SDValue &Disp, SDValue &Index); - - bool MatchAddress(SDValue N, SystemZRRIAddressMode &AM, - bool is12Bit, unsigned Depth = 0); - bool MatchAddressBase(SDValue N, SystemZRRIAddressMode &AM); - }; -} // end anonymous namespace + // BD matching routines used by SystemZOperands.td. + bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) const { + return selectBDAddr(SystemZAddressingMode::Disp12Only, Addr, Base, Disp); + } + bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { + return selectBDAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp); + } + bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) const { + return selectBDAddr(SystemZAddressingMode::Disp20Only, Addr, Base, Disp); + } + bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { + return selectBDAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp); + } + + // MVI matching routines used by SystemZOperands.td. + bool selectMVIAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { + return selectMVIAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp); + } + bool selectMVIAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { + return selectMVIAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp); + } + + // BDX matching routines used by SystemZOperands.td. + bool selectBDXAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp, + SDValue &Index) const { + return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, + SystemZAddressingMode::Disp12Only, + Addr, Base, Disp, Index); + } + bool selectBDXAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp, + SDValue &Index) const { + return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, + SystemZAddressingMode::Disp12Pair, + Addr, Base, Disp, Index); + } + bool selectDynAlloc12Only(SDValue Addr, SDValue &Base, SDValue &Disp, + SDValue &Index) const { + return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc, + SystemZAddressingMode::Disp12Only, + Addr, Base, Disp, Index); + } + bool selectBDXAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp, + SDValue &Index) const { + return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, + SystemZAddressingMode::Disp20Only, + Addr, Base, Disp, Index); + } + bool selectBDXAddr20Only128(SDValue Addr, SDValue &Base, SDValue &Disp, + SDValue &Index) const { + return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, + SystemZAddressingMode::Disp20Only128, + Addr, Base, Disp, Index); + } + bool selectBDXAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp, + SDValue &Index) const { + return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, + SystemZAddressingMode::Disp20Pair, + Addr, Base, Disp, Index); + } + bool selectLAAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp, + SDValue &Index) const { + return selectBDXAddr(SystemZAddressingMode::FormBDXLA, + SystemZAddressingMode::Disp12Pair, + Addr, Base, Disp, Index); + } + bool selectLAAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp, + SDValue &Index) const { + return selectBDXAddr(SystemZAddressingMode::FormBDXLA, + SystemZAddressingMode::Disp20Pair, + Addr, Base, Disp, Index); + } + + // Check whether (or Op (and X InsertMask)) is effectively an insertion + // of X into bits InsertMask of some Y != Op. Return true if so and + // set Op to that Y. + bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask) const; + + // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used. + // Return true on success. + bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) const; + + // Try to fold some of RxSBG.Input into other fields of RxSBG. + // Return true on success. + bool expandRxSBG(RxSBGOperands &RxSBG) const; + + // Return an undefined value of type VT. + SDValue getUNDEF(SDLoc DL, EVT VT) const; + + // Convert N to VT, if it isn't already. + SDValue convertTo(SDLoc DL, EVT VT, SDValue N) const; + + // Try to implement AND or shift node N using RISBG with the zero flag set. + // Return the selected node on success, otherwise return null. + SDNode *tryRISBGZero(SDNode *N); + + // Try to use RISBG or Opcode to implement OR or XOR node N. + // Return the selected node on success, otherwise return null. + SDNode *tryRxSBG(SDNode *N, unsigned Opcode); + + // If Op0 is null, then Node is a constant that can be loaded using: + // + // (Opcode UpperVal LowerVal) + // + // If Op0 is nonnull, then Node can be implemented using: + // + // (Opcode (Opcode Op0 UpperVal) LowerVal) + SDNode *splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0, + uint64_t UpperVal, uint64_t LowerVal); + + // Return true if Load and Store are loads and stores of the same size + // and are guaranteed not to overlap. Such operations can be implemented + // using block (SS-format) instructions. + // + // Partial overlap would lead to incorrect code, since the block operations + // are logically bytewise, even though they have a fast path for the + // non-overlapping case. We also need to avoid full overlap (i.e. two + // addresses that might be equal at run time) because although that case + // would be handled correctly, it might be implemented by millicode. + bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const; + + // N is a (store (load Y), X) pattern. Return true if it can use an MVC + // from Y to X. + bool storeLoadCanUseMVC(SDNode *N) const; + + // N is a (store (op (load A[0]), (load A[1])), X) pattern. Return true + // if A[1 - I] == X and if N can use a block operation like NC from A[I] + // to X. + bool storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const; + +public: + SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel) + : SelectionDAGISel(TM, OptLevel), + Lowering(*TM.getTargetLowering()), + Subtarget(*TM.getSubtargetImpl()) { } + + // Override MachineFunctionPass. + virtual const char *getPassName() const LLVM_OVERRIDE { + return "SystemZ DAG->DAG Pattern Instruction Selection"; + } + + // Override SelectionDAGISel. + virtual SDNode *Select(SDNode *Node) LLVM_OVERRIDE; + virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, + char ConstraintCode, + std::vector &OutOps) + LLVM_OVERRIDE; + + // Include the pieces autogenerated from the target description. + #include "SystemZGenDAGISel.inc" +}; +} // end anonymous namespace -/// createSystemZISelDag - This pass converts a legalized DAG into a -/// SystemZ-specific DAG, ready for instruction scheduling. -/// FunctionPass *llvm::createSystemZISelDag(SystemZTargetMachine &TM, - CodeGenOpt::Level OptLevel) { + CodeGenOpt::Level OptLevel) { return new SystemZDAGToDAGISel(TM, OptLevel); } -/// isImmSExt20 - This method tests to see if the node is either a 32-bit -/// or 64-bit immediate, and if the value can be accurately represented as a -/// sign extension from a 20-bit value. If so, this returns true and the -/// immediate. -static bool isImmSExt20(int64_t Val, int64_t &Imm) { - if (Val >= -524288 && Val <= 524287) { - Imm = Val; +// Return true if Val should be selected as a displacement for an address +// with range DR. Here we're interested in the range of both the instruction +// described by DR and of any pairing instruction. +static bool selectDisp(SystemZAddressingMode::DispRange DR, int64_t Val) { + switch (DR) { + case SystemZAddressingMode::Disp12Only: + return isUInt<12>(Val); + + case SystemZAddressingMode::Disp12Pair: + case SystemZAddressingMode::Disp20Only: + case SystemZAddressingMode::Disp20Pair: + return isInt<20>(Val); + + case SystemZAddressingMode::Disp20Only128: + return isInt<20>(Val) && isInt<20>(Val + 8); + } + llvm_unreachable("Unhandled displacement range"); +} + +// Change the base or index in AM to Value, where IsBase selects +// between the base and index. +static void changeComponent(SystemZAddressingMode &AM, bool IsBase, + SDValue Value) { + if (IsBase) + AM.Base = Value; + else + AM.Index = Value; +} + +// The base or index of AM is equivalent to Value + ADJDYNALLOC, +// where IsBase selects between the base and index. Try to fold the +// ADJDYNALLOC into AM. +static bool expandAdjDynAlloc(SystemZAddressingMode &AM, bool IsBase, + SDValue Value) { + if (AM.isDynAlloc() && !AM.IncludesDynAlloc) { + changeComponent(AM, IsBase, Value); + AM.IncludesDynAlloc = true; return true; } return false; } -/// isImmZExt12 - This method tests to see if the node is either a 32-bit -/// or 64-bit immediate, and if the value can be accurately represented as a -/// zero extension from a 12-bit value. If so, this returns true and the -/// immediate. -static bool isImmZExt12(int64_t Val, int64_t &Imm) { - if (Val >= 0 && Val <= 0xFFF) { - Imm = Val; +// The base of AM is equivalent to Base + Index. Try to use Index as +// the index register. +static bool expandIndex(SystemZAddressingMode &AM, SDValue Base, + SDValue Index) { + if (AM.hasIndexField() && !AM.Index.getNode()) { + AM.Base = Base; + AM.Index = Index; return true; } return false; } -/// MatchAddress - Add the specified node to the specified addressing mode, -/// returning true if it cannot be done. This just pattern matches for the -/// addressing mode. -bool SystemZDAGToDAGISel::MatchAddress(SDValue N, SystemZRRIAddressMode &AM, - bool is12Bit, unsigned Depth) { - DebugLoc dl = N.getDebugLoc(); - DEBUG(errs() << "MatchAddress: "; AM.dump()); - // Limit recursion. - if (Depth > 5) - return MatchAddressBase(N, AM); - - // FIXME: We can perform better here. If we have something like - // (shift (add A, imm), N), we can try to reassociate stuff and fold shift of - // imm into addressing mode. - switch (N.getOpcode()) { - default: break; - case ISD::Constant: { - int64_t Val = cast(N)->getSExtValue(); - int64_t Imm = 0; - bool Match = (is12Bit ? - isImmZExt12(AM.Disp + Val, Imm) : - isImmSExt20(AM.Disp + Val, Imm)); - if (Match) { - AM.Disp = Imm; - return false; - } - break; +// The base or index of AM is equivalent to Op0 + Op1, where IsBase selects +// between the base and index. Try to fold Op1 into AM's displacement. +static bool expandDisp(SystemZAddressingMode &AM, bool IsBase, + SDValue Op0, uint64_t Op1) { + // First try adjusting the displacement. + int64_t TestDisp = AM.Disp + Op1; + if (selectDisp(AM.DR, TestDisp)) { + changeComponent(AM, IsBase, Op0); + AM.Disp = TestDisp; + return true; } - case ISD::FrameIndex: - if (AM.BaseType == SystemZRRIAddressMode::RegBase && - AM.Base.Reg.getNode() == 0) { - AM.BaseType = SystemZRRIAddressMode::FrameIndexBase; - AM.Base.FrameIndex = cast(N)->getIndex(); - return false; - } - break; + // We could consider forcing the displacement into a register and + // using it as an index, but it would need to be carefully tuned. + return false; +} - case ISD::SUB: { - // Given A-B, if A can be completely folded into the address and - // the index field with the index field unused, use -B as the index. - // This is a win if a has multiple parts that can be folded into - // the address. Also, this saves a mov if the base register has - // other uses, since it avoids a two-address sub instruction, however - // it costs an additional mov if the index register has other uses. - - // Test if the LHS of the sub can be folded. - SystemZRRIAddressMode Backup = AM; - if (MatchAddress(N.getNode()->getOperand(0), AM, is12Bit, Depth+1)) { - AM = Backup; - break; - } - // Test if the index field is free for use. - if (AM.IndexReg.getNode() || AM.isRI) { - AM = Backup; - break; - } +bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM, + bool IsBase) const { + SDValue N = IsBase ? AM.Base : AM.Index; + unsigned Opcode = N.getOpcode(); + if (Opcode == ISD::TRUNCATE) { + N = N.getOperand(0); + Opcode = N.getOpcode(); + } + if (Opcode == ISD::ADD || CurDAG->isBaseWithConstantOffset(N)) { + SDValue Op0 = N.getOperand(0); + SDValue Op1 = N.getOperand(1); + + unsigned Op0Code = Op0->getOpcode(); + unsigned Op1Code = Op1->getOpcode(); + + if (Op0Code == SystemZISD::ADJDYNALLOC) + return expandAdjDynAlloc(AM, IsBase, Op1); + if (Op1Code == SystemZISD::ADJDYNALLOC) + return expandAdjDynAlloc(AM, IsBase, Op0); + + if (Op0Code == ISD::Constant) + return expandDisp(AM, IsBase, Op1, + cast(Op0)->getSExtValue()); + if (Op1Code == ISD::Constant) + return expandDisp(AM, IsBase, Op0, + cast(Op1)->getSExtValue()); + + if (IsBase && expandIndex(AM, Op0, Op1)) + return true; + } + if (Opcode == SystemZISD::PCREL_OFFSET) { + SDValue Full = N.getOperand(0); + SDValue Base = N.getOperand(1); + SDValue Anchor = Base.getOperand(0); + uint64_t Offset = (cast(Full)->getOffset() - + cast(Anchor)->getOffset()); + return expandDisp(AM, IsBase, Base, Offset); + } + return false; +} - // If the base is a register with multiple uses, this transformation may - // save a mov. Otherwise it's probably better not to do it. - if (AM.BaseType == SystemZRRIAddressMode::RegBase && - (!AM.Base.Reg.getNode() || AM.Base.Reg.getNode()->hasOneUse())) { - AM = Backup; - break; - } +// Return true if an instruction with displacement range DR should be +// used for displacement value Val. selectDisp(DR, Val) must already hold. +static bool isValidDisp(SystemZAddressingMode::DispRange DR, int64_t Val) { + assert(selectDisp(DR, Val) && "Invalid displacement"); + switch (DR) { + case SystemZAddressingMode::Disp12Only: + case SystemZAddressingMode::Disp20Only: + case SystemZAddressingMode::Disp20Only128: + return true; - // Ok, the transformation is legal and appears profitable. Go for it. - SDValue RHS = N.getNode()->getOperand(1); - SDValue Zero = CurDAG->getConstant(0, N.getValueType()); - SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); - AM.IndexReg = Neg; - - // Insert the new nodes into the topological ordering. - if (Zero.getNode()->getNodeId() == -1 || - Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) { - CurDAG->RepositionNode(N.getNode(), Zero.getNode()); - Zero.getNode()->setNodeId(N.getNode()->getNodeId()); - } - if (Neg.getNode()->getNodeId() == -1 || - Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) { - CurDAG->RepositionNode(N.getNode(), Neg.getNode()); - Neg.getNode()->setNodeId(N.getNode()->getNodeId()); - } - return false; + case SystemZAddressingMode::Disp12Pair: + // Use the other instruction if the displacement is too large. + return isUInt<12>(Val); + + case SystemZAddressingMode::Disp20Pair: + // Use the other instruction if the displacement is small enough. + return !isUInt<12>(Val); } + llvm_unreachable("Unhandled displacement range"); +} + +// Return true if Base + Disp + Index should be performed by LA(Y). +static bool shouldUseLA(SDNode *Base, int64_t Disp, SDNode *Index) { + // Don't use LA(Y) for constants. + if (!Base) + return false; - case ISD::ADD: { - SystemZRRIAddressMode Backup = AM; - if (!MatchAddress(N.getNode()->getOperand(0), AM, is12Bit, Depth+1) && - !MatchAddress(N.getNode()->getOperand(1), AM, is12Bit, Depth+1)) + // Always use LA(Y) for frame addresses, since we know that the destination + // register is almost always (perhaps always) going to be different from + // the frame register. + if (Base->getOpcode() == ISD::FrameIndex) + return true; + + if (Disp) { + // Always use LA(Y) if there is a base, displacement and index. + if (Index) + return true; + + // Always use LA if the displacement is small enough. It should always + // be no worse than AGHI (and better if it avoids a move). + if (isUInt<12>(Disp)) + return true; + + // For similar reasons, always use LAY if the constant is too big for AGHI. + // LAY should be no worse than AGFI. + if (!isInt<16>(Disp)) + return true; + } else { + // Don't use LA for plain registers. + if (!Index) return false; - AM = Backup; - if (!MatchAddress(N.getNode()->getOperand(1), AM, is12Bit, Depth+1) && - !MatchAddress(N.getNode()->getOperand(0), AM, is12Bit, Depth+1)) + + // Don't use LA for plain addition if the index operand is only used + // once. It should be a natural two-operand addition in that case. + if (Index->hasOneUse()) return false; - AM = Backup; - - // If we couldn't fold both operands into the address at the same time, - // see if we can just put each operand into a register and fold at least - // the add. - if (!AM.isRI && - AM.BaseType == SystemZRRIAddressMode::RegBase && - !AM.Base.Reg.getNode() && !AM.IndexReg.getNode()) { - AM.Base.Reg = N.getNode()->getOperand(0); - AM.IndexReg = N.getNode()->getOperand(1); + + // Prefer addition if the second operation is sign-extended, in the + // hope of using AGF. + unsigned IndexOpcode = Index->getOpcode(); + if (IndexOpcode == ISD::SIGN_EXTEND || + IndexOpcode == ISD::SIGN_EXTEND_INREG) return false; - } - break; } - case ISD::OR: - // Handle "X | C" as "X + C" iff X is known to have C bits clear. - if (ConstantSDNode *CN = dyn_cast(N.getOperand(1))) { - SystemZRRIAddressMode Backup = AM; - int64_t Offset = CN->getSExtValue(); - int64_t Imm = 0; - bool MatchOffset = (is12Bit ? - isImmZExt12(AM.Disp + Offset, Imm) : - isImmSExt20(AM.Disp + Offset, Imm)); - // The resultant disp must fit in 12 or 20-bits. - if (MatchOffset && - // LHS should be an addr mode. - !MatchAddress(N.getOperand(0), AM, is12Bit, Depth+1) && - // Check to see if the LHS & C is zero. - CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) { - AM.Disp = Imm; - return false; - } - AM = Backup; - } - break; - } + // Don't use LA for two-operand addition if either operand is only + // used once. The addition instructions are better in that case. + if (Base->hasOneUse()) + return false; - return MatchAddressBase(N, AM); + return true; } -/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the -/// specified addressing mode without any further recursion. -bool SystemZDAGToDAGISel::MatchAddressBase(SDValue N, - SystemZRRIAddressMode &AM) { - // Is the base register already occupied? - if (AM.BaseType != SystemZRRIAddressMode::RegBase || AM.Base.Reg.getNode()) { - // If so, check to see if the index register is set. - if (AM.IndexReg.getNode() == 0 && !AM.isRI) { - AM.IndexReg = N; - return false; - } +// Return true if Addr is suitable for AM, updating AM if so. +bool SystemZDAGToDAGISel::selectAddress(SDValue Addr, + SystemZAddressingMode &AM) const { + // Start out assuming that the address will need to be loaded separately, + // then try to extend it as much as we can. + AM.Base = Addr; + + // First try treating the address as a constant. + if (Addr.getOpcode() == ISD::Constant && + expandDisp(AM, true, SDValue(), + cast(Addr)->getSExtValue())) + ; + else + // Otherwise try expanding each component. + while (expandAddress(AM, true) || + (AM.Index.getNode() && expandAddress(AM, false))) + continue; + + // Reject cases where it isn't profitable to use LA(Y). + if (AM.Form == SystemZAddressingMode::FormBDXLA && + !shouldUseLA(AM.Base.getNode(), AM.Disp, AM.Index.getNode())) + return false; - // Otherwise, we cannot select it. - return true; - } + // Reject cases where the other instruction in a pair should be used. + if (!isValidDisp(AM.DR, AM.Disp)) + return false; - // Default, generate it as a register. - AM.BaseType = SystemZRRIAddressMode::RegBase; - AM.Base.Reg = N; - return false; + // Make sure that ADJDYNALLOC is included where necessary. + if (AM.isDynAlloc() && !AM.IncludesDynAlloc) + return false; + + DEBUG(AM.dump()); + return true; } -void SystemZDAGToDAGISel::getAddressOperandsRI(const SystemZRRIAddressMode &AM, - SDValue &Base, SDValue &Disp) { - if (AM.BaseType == SystemZRRIAddressMode::RegBase) - Base = AM.Base.Reg; - else - Base = CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()); - Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i64); +// Insert a node into the DAG at least before Pos. This will reposition +// the node as needed, and will assign it a node ID that is <= Pos's ID. +// Note that this does *not* preserve the uniqueness of node IDs! +// The selection DAG must no longer depend on their uniqueness when this +// function is used. +static void insertDAGNode(SelectionDAG *DAG, SDNode *Pos, SDValue N) { + if (N.getNode()->getNodeId() == -1 || + N.getNode()->getNodeId() > Pos->getNodeId()) { + DAG->RepositionNode(Pos, N.getNode()); + N.getNode()->setNodeId(Pos->getNodeId()); + } } -void SystemZDAGToDAGISel::getAddressOperands(const SystemZRRIAddressMode &AM, - SDValue &Base, SDValue &Disp, - SDValue &Index) { - getAddressOperandsRI(AM, Base, Disp); - Index = AM.IndexReg; +void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM, + EVT VT, SDValue &Base, + SDValue &Disp) const { + Base = AM.Base; + if (!Base.getNode()) + // Register 0 means "no base". This is mostly useful for shifts. + Base = CurDAG->getRegister(0, VT); + else if (Base.getOpcode() == ISD::FrameIndex) { + // Lower a FrameIndex to a TargetFrameIndex. + int64_t FrameIndex = cast(Base)->getIndex(); + Base = CurDAG->getTargetFrameIndex(FrameIndex, VT); + } else if (Base.getValueType() != VT) { + // Truncate values from i64 to i32, for shifts. + assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 && + "Unexpected truncation"); + SDLoc DL(Base); + SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base); + insertDAGNode(CurDAG, Base.getNode(), Trunc); + Base = Trunc; + } + + // Lower the displacement to a TargetConstant. + Disp = CurDAG->getTargetConstant(AM.Disp, VT); } -/// Returns true if the address can be represented by a base register plus -/// an unsigned 12-bit displacement [r+imm]. -bool SystemZDAGToDAGISel::SelectAddrRI12Only(SDNode *Op, SDValue& Addr, - SDValue &Base, SDValue &Disp) { - return SelectAddrRI12(Op, Addr, Base, Disp, /*is12BitOnly*/true); +void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM, + EVT VT, SDValue &Base, + SDValue &Disp, + SDValue &Index) const { + getAddressOperands(AM, VT, Base, Disp); + + Index = AM.Index; + if (!Index.getNode()) + // Register 0 means "no index". + Index = CurDAG->getRegister(0, VT); } -bool SystemZDAGToDAGISel::SelectAddrRI12(SDNode *Op, SDValue& Addr, - SDValue &Base, SDValue &Disp, - bool is12BitOnly) { - SystemZRRIAddressMode AM20(/*isRI*/true), AM12(/*isRI*/true); - bool Done = false; - - if (!Addr.hasOneUse()) { - unsigned Opcode = Addr.getOpcode(); - if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex) { - // If we are able to fold N into addressing mode, then we'll allow it even - // if N has multiple uses. In general, addressing computation is used as - // addresses by all of its uses. But watch out for CopyToReg uses, that - // means the address computation is liveout. It will be computed by a LA - // so we want to avoid computing the address twice. - for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), - UE = Addr.getNode()->use_end(); UI != UE; ++UI) { - if (UI->getOpcode() == ISD::CopyToReg) { - MatchAddressBase(Addr, AM12); - Done = true; - break; - } - } - } - } - if (!Done && MatchAddress(Addr, AM12, /* is12Bit */ true)) +bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR, + SDValue Addr, SDValue &Base, + SDValue &Disp) const { + SystemZAddressingMode AM(SystemZAddressingMode::FormBD, DR); + if (!selectAddress(Addr, AM)) return false; - // Check, whether we can match stuff using 20-bit displacements - if (!Done && !is12BitOnly && - !MatchAddress(Addr, AM20, /* is12Bit */ false)) - if (AM12.Disp == 0 && AM20.Disp != 0) - return false; - - DEBUG(errs() << "MatchAddress (final): "; AM12.dump()); + getAddressOperands(AM, Addr.getValueType(), Base, Disp); + return true; +} - EVT VT = Addr.getValueType(); - if (AM12.BaseType == SystemZRRIAddressMode::RegBase) { - if (!AM12.Base.Reg.getNode()) - AM12.Base.Reg = CurDAG->getRegister(0, VT); - } +bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR, + SDValue Addr, SDValue &Base, + SDValue &Disp) const { + SystemZAddressingMode AM(SystemZAddressingMode::FormBDXNormal, DR); + if (!selectAddress(Addr, AM) || AM.Index.getNode()) + return false; - assert(AM12.IndexReg.getNode() == 0 && "Invalid reg-imm address mode!"); + getAddressOperands(AM, Addr.getValueType(), Base, Disp); + return true; +} - getAddressOperandsRI(AM12, Base, Disp); +bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form, + SystemZAddressingMode::DispRange DR, + SDValue Addr, SDValue &Base, + SDValue &Disp, SDValue &Index) const { + SystemZAddressingMode AM(Form, DR); + if (!selectAddress(Addr, AM)) + return false; + getAddressOperands(AM, Addr.getValueType(), Base, Disp, Index); return true; } -/// Returns true if the address can be represented by a base register plus -/// a signed 20-bit displacement [r+imm]. -bool SystemZDAGToDAGISel::SelectAddrRI(SDNode *Op, SDValue& Addr, - SDValue &Base, SDValue &Disp) { - SystemZRRIAddressMode AM(/*isRI*/true); - bool Done = false; - - if (!Addr.hasOneUse()) { - unsigned Opcode = Addr.getOpcode(); - if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex) { - // If we are able to fold N into addressing mode, then we'll allow it even - // if N has multiple uses. In general, addressing computation is used as - // addresses by all of its uses. But watch out for CopyToReg uses, that - // means the address computation is liveout. It will be computed by a LA - // so we want to avoid computing the address twice. - for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), - UE = Addr.getNode()->use_end(); UI != UE; ++UI) { - if (UI->getOpcode() == ISD::CopyToReg) { - MatchAddressBase(Addr, AM); - Done = true; - break; - } - } - } - } - if (!Done && MatchAddress(Addr, AM, /* is12Bit */ false)) +bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op, + uint64_t InsertMask) const { + // We're only interested in cases where the insertion is into some operand + // of Op, rather than into Op itself. The only useful case is an AND. + if (Op.getOpcode() != ISD::AND) return false; - DEBUG(errs() << "MatchAddress (final): "; AM.dump()); + // We need a constant mask. + ConstantSDNode *MaskNode = + dyn_cast(Op.getOperand(1).getNode()); + if (!MaskNode) + return false; + + // It's not an insertion of Op.getOperand(0) if the two masks overlap. + uint64_t AndMask = MaskNode->getZExtValue(); + if (InsertMask & AndMask) + return false; - EVT VT = Addr.getValueType(); - if (AM.BaseType == SystemZRRIAddressMode::RegBase) { - if (!AM.Base.Reg.getNode()) - AM.Base.Reg = CurDAG->getRegister(0, VT); + // It's only an insertion if all bits are covered or are known to be zero. + // The inner check covers all cases but is more expensive. + uint64_t Used = allOnes(Op.getValueType().getSizeInBits()); + if (Used != (AndMask | InsertMask)) { + APInt KnownZero, KnownOne; + CurDAG->ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne); + if (Used != (AndMask | InsertMask | KnownZero.getZExtValue())) + return false; } - assert(AM.IndexReg.getNode() == 0 && "Invalid reg-imm address mode!"); + Op = Op.getOperand(0); + return true; +} - getAddressOperandsRI(AM, Base, Disp); +bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG, + uint64_t Mask) const { + const SystemZInstrInfo *TII = getInstrInfo(); + if (RxSBG.Rotate != 0) + Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)); + Mask &= RxSBG.Mask; + if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) { + RxSBG.Mask = Mask; + return true; + } + return false; +} - return true; +// RxSBG.Input is a shift of Count bits in the direction given by IsLeft. +// Return true if the result depends on the signs or zeros that are +// shifted in. +static bool shiftedInBitsMatter(RxSBGOperands &RxSBG, uint64_t Count, + bool IsLeft) { + // Work out which bits of the shift result are zeros or sign copies. + uint64_t ShiftedIn = allOnes(Count); + if (!IsLeft) + ShiftedIn <<= RxSBG.BitSize - Count; + + // Rotate that mask in the same way as RxSBG.Input is rotated. + if (RxSBG.Rotate != 0) + ShiftedIn = ((ShiftedIn << RxSBG.Rotate) | + (ShiftedIn >> (64 - RxSBG.Rotate))); + + // Fail if any of the zero or sign bits are used. + return (ShiftedIn & RxSBG.Mask) != 0; } -/// Returns true if the address can be represented by a base register plus -/// index register plus an unsigned 12-bit displacement [base + idx + imm]. -bool SystemZDAGToDAGISel::SelectAddrRRI12(SDNode *Op, SDValue Addr, - SDValue &Base, SDValue &Disp, SDValue &Index) { - SystemZRRIAddressMode AM20, AM12; - bool Done = false; - - if (!Addr.hasOneUse()) { - unsigned Opcode = Addr.getOpcode(); - if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex) { - // If we are able to fold N into addressing mode, then we'll allow it even - // if N has multiple uses. In general, addressing computation is used as - // addresses by all of its uses. But watch out for CopyToReg uses, that - // means the address computation is liveout. It will be computed by a LA - // so we want to avoid computing the address twice. - for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), - UE = Addr.getNode()->use_end(); UI != UE; ++UI) { - if (UI->getOpcode() == ISD::CopyToReg) { - MatchAddressBase(Addr, AM12); - Done = true; - break; - } - } +bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { + SDValue N = RxSBG.Input; + unsigned Opcode = N.getOpcode(); + switch (Opcode) { + case ISD::AND: { + if (RxSBG.Opcode == SystemZ::RNSBG) + return false; + + ConstantSDNode *MaskNode = + dyn_cast(N.getOperand(1).getNode()); + if (!MaskNode) + return false; + + SDValue Input = N.getOperand(0); + uint64_t Mask = MaskNode->getZExtValue(); + if (!refineRxSBGMask(RxSBG, Mask)) { + // If some bits of Input are already known zeros, those bits will have + // been removed from the mask. See if adding them back in makes the + // mask suitable. + APInt KnownZero, KnownOne; + CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne); + Mask |= KnownZero.getZExtValue(); + if (!refineRxSBGMask(RxSBG, Mask)) + return false; } + RxSBG.Input = Input; + return true; } - if (!Done && MatchAddress(Addr, AM12, /* is12Bit */ true)) - return false; - // Check, whether we can match stuff using 20-bit displacements - if (!Done && !MatchAddress(Addr, AM20, /* is12Bit */ false)) - if (AM12.Disp == 0 && AM20.Disp != 0) + case ISD::OR: { + if (RxSBG.Opcode != SystemZ::RNSBG) return false; - DEBUG(errs() << "MatchAddress (final): "; AM12.dump()); + ConstantSDNode *MaskNode = + dyn_cast(N.getOperand(1).getNode()); + if (!MaskNode) + return false; - EVT VT = Addr.getValueType(); - if (AM12.BaseType == SystemZRRIAddressMode::RegBase) { - if (!AM12.Base.Reg.getNode()) - AM12.Base.Reg = CurDAG->getRegister(0, VT); + SDValue Input = N.getOperand(0); + uint64_t Mask = ~MaskNode->getZExtValue(); + if (!refineRxSBGMask(RxSBG, Mask)) { + // If some bits of Input are already known ones, those bits will have + // been removed from the mask. See if adding them back in makes the + // mask suitable. + APInt KnownZero, KnownOne; + CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne); + Mask &= ~KnownOne.getZExtValue(); + if (!refineRxSBGMask(RxSBG, Mask)) + return false; + } + RxSBG.Input = Input; + return true; } - if (!AM12.IndexReg.getNode()) - AM12.IndexReg = CurDAG->getRegister(0, VT); + case ISD::ROTL: { + // Any 64-bit rotate left can be merged into the RxSBG. + if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64) + return false; + ConstantSDNode *CountNode + = dyn_cast(N.getOperand(1).getNode()); + if (!CountNode) + return false; - getAddressOperands(AM12, Base, Disp, Index); + RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63; + RxSBG.Input = N.getOperand(0); + return true; + } + + case ISD::SIGN_EXTEND: + case ISD::ZERO_EXTEND: + case ISD::ANY_EXTEND: { + // Check that the extension bits are don't-care (i.e. are masked out + // by the final mask). + unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits(); + if (shiftedInBitsMatter(RxSBG, RxSBG.BitSize - InnerBitSize, false)) + return false; - return true; -} + RxSBG.Input = N.getOperand(0); + return true; + } -/// Returns true if the address can be represented by a base register plus -/// index register plus a signed 20-bit displacement [base + idx + imm]. -bool SystemZDAGToDAGISel::SelectAddrRRI20(SDNode *Op, SDValue Addr, - SDValue &Base, SDValue &Disp, SDValue &Index) { - SystemZRRIAddressMode AM; - bool Done = false; - - if (!Addr.hasOneUse()) { - unsigned Opcode = Addr.getOpcode(); - if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex) { - // If we are able to fold N into addressing mode, then we'll allow it even - // if N has multiple uses. In general, addressing computation is used as - // addresses by all of its uses. But watch out for CopyToReg uses, that - // means the address computation is liveout. It will be computed by a LA - // so we want to avoid computing the address twice. - for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), - UE = Addr.getNode()->use_end(); UI != UE; ++UI) { - if (UI->getOpcode() == ISD::CopyToReg) { - MatchAddressBase(Addr, AM); - Done = true; - break; - } - } + case ISD::SHL: { + ConstantSDNode *CountNode = + dyn_cast(N.getOperand(1).getNode()); + if (!CountNode) + return false; + + uint64_t Count = CountNode->getZExtValue(); + unsigned BitSize = N.getValueType().getSizeInBits(); + if (Count < 1 || Count >= BitSize) + return false; + + if (RxSBG.Opcode == SystemZ::RNSBG) { + // Treat (shl X, count) as (rotl X, size-count) as long as the bottom + // count bits from RxSBG.Input are ignored. + if (shiftedInBitsMatter(RxSBG, Count, true)) + return false; + } else { + // Treat (shl X, count) as (and (rotl X, count), ~0<(N.getOperand(1).getNode()); + if (!CountNode) + return false; - EVT VT = Addr.getValueType(); - if (AM.BaseType == SystemZRRIAddressMode::RegBase) { - if (!AM.Base.Reg.getNode()) - AM.Base.Reg = CurDAG->getRegister(0, VT); + uint64_t Count = CountNode->getZExtValue(); + unsigned BitSize = N.getValueType().getSizeInBits(); + if (Count < 1 || Count >= BitSize) + return false; + + if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) { + // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top + // count bits from RxSBG.Input are ignored. + if (shiftedInBitsMatter(RxSBG, Count, false)) + return false; + } else { + // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count), + // which is similar to SLL above. + if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count))) + return false; + } + + RxSBG.Rotate = (RxSBG.Rotate - Count) & 63; + RxSBG.Input = N.getOperand(0); + return true; } + default: + return false; + } +} - if (!AM.IndexReg.getNode()) - AM.IndexReg = CurDAG->getRegister(0, VT); +SDValue SystemZDAGToDAGISel::getUNDEF(SDLoc DL, EVT VT) const { + SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT); + return SDValue(N, 0); +} - getAddressOperands(AM, Base, Disp, Index); +SDValue SystemZDAGToDAGISel::convertTo(SDLoc DL, EVT VT, SDValue N) const { + if (N.getValueType() == MVT::i32 && VT == MVT::i64) + return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32, + DL, VT, getUNDEF(DL, MVT::i64), N); + if (N.getValueType() == MVT::i64 && VT == MVT::i32) + return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N); + assert(N.getValueType() == VT && "Unexpected value types"); + return N; +} - return true; +SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) { + EVT VT = N->getValueType(0); + RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0)); + unsigned Count = 0; + while (expandRxSBG(RISBG)) + if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND) + Count += 1; + if (Count == 0) + return 0; + if (Count == 1) { + // Prefer to use normal shift instructions over RISBG, since they can handle + // all cases and are sometimes shorter. + if (N->getOpcode() != ISD::AND) + return 0; + + // Prefer register extensions like LLC over RISBG. Also prefer to start + // out with normal ANDs if one instruction would be enough. We can convert + // these ANDs into an RISBG later if a three-address instruction is useful. + if (VT == MVT::i32 || + RISBG.Mask == 0xff || + RISBG.Mask == 0xffff || + SystemZ::isImmLF(~RISBG.Mask) || + SystemZ::isImmHF(~RISBG.Mask)) { + // Force the new mask into the DAG, since it may include known-one bits. + ConstantSDNode *MaskN = cast(N->getOperand(1).getNode()); + if (MaskN->getZExtValue() != RISBG.Mask) { + SDValue NewMask = CurDAG->getConstant(RISBG.Mask, VT); + N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask); + return SelectCode(N); + } + return 0; + } + } + + unsigned Opcode = SystemZ::RISBG; + EVT OpcodeVT = MVT::i64; + if (VT == MVT::i32 && Subtarget.hasHighWord()) { + Opcode = SystemZ::RISBMux; + OpcodeVT = MVT::i32; + RISBG.Start &= 31; + RISBG.End &= 31; + } + SDValue Ops[5] = { + getUNDEF(SDLoc(N), OpcodeVT), + convertTo(SDLoc(N), OpcodeVT, RISBG.Input), + CurDAG->getTargetConstant(RISBG.Start, MVT::i32), + CurDAG->getTargetConstant(RISBG.End | 128, MVT::i32), + CurDAG->getTargetConstant(RISBG.Rotate, MVT::i32) + }; + N = CurDAG->getMachineNode(Opcode, SDLoc(N), OpcodeVT, Ops); + return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode(); +} + +SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) { + // Try treating each operand of N as the second operand of the RxSBG + // and see which goes deepest. + RxSBGOperands RxSBG[] = { + RxSBGOperands(Opcode, N->getOperand(0)), + RxSBGOperands(Opcode, N->getOperand(1)) + }; + unsigned Count[] = { 0, 0 }; + for (unsigned I = 0; I < 2; ++I) + while (expandRxSBG(RxSBG[I])) + if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND) + Count[I] += 1; + + // Do nothing if neither operand is suitable. + if (Count[0] == 0 && Count[1] == 0) + return 0; + + // Pick the deepest second operand. + unsigned I = Count[0] > Count[1] ? 0 : 1; + SDValue Op0 = N->getOperand(I ^ 1); + + // Prefer IC for character insertions from memory. + if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0) + if (LoadSDNode *Load = dyn_cast(Op0.getNode())) + if (Load->getMemoryVT() == MVT::i8) + return 0; + + // See whether we can avoid an AND in the first operand by converting + // ROSBG to RISBG. + if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask)) + Opcode = SystemZ::RISBG; + + EVT VT = N->getValueType(0); + SDValue Ops[5] = { + convertTo(SDLoc(N), MVT::i64, Op0), + convertTo(SDLoc(N), MVT::i64, RxSBG[I].Input), + CurDAG->getTargetConstant(RxSBG[I].Start, MVT::i32), + CurDAG->getTargetConstant(RxSBG[I].End, MVT::i32), + CurDAG->getTargetConstant(RxSBG[I].Rotate, MVT::i32) + }; + N = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, Ops); + return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode(); +} + +SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node, + SDValue Op0, uint64_t UpperVal, + uint64_t LowerVal) { + EVT VT = Node->getValueType(0); + SDLoc DL(Node); + SDValue Upper = CurDAG->getConstant(UpperVal, VT); + if (Op0.getNode()) + Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper); + Upper = SDValue(Select(Upper.getNode()), 0); + + SDValue Lower = CurDAG->getConstant(LowerVal, VT); + SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower); + return Or.getNode(); } -/// SelectLAAddr - it calls SelectAddr and determines if the maximal addressing -/// mode it matches can be cost effectively emitted as an LA/LAY instruction. -bool SystemZDAGToDAGISel::SelectLAAddr(SDNode *Op, SDValue Addr, - SDValue &Base, SDValue &Disp, SDValue &Index) { - SystemZRRIAddressMode AM; +bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store, + LoadSDNode *Load) const { + // Check that the two memory operands have the same size. + if (Load->getMemoryVT() != Store->getMemoryVT()) + return false; - if (MatchAddress(Addr, AM, false)) + // Volatility stops an access from being decomposed. + if (Load->isVolatile() || Store->isVolatile()) return false; - EVT VT = Addr.getValueType(); - unsigned Complexity = 0; - if (AM.BaseType == SystemZRRIAddressMode::RegBase) - if (AM.Base.Reg.getNode()) - Complexity = 1; - else - AM.Base.Reg = CurDAG->getRegister(0, VT); - else if (AM.BaseType == SystemZRRIAddressMode::FrameIndexBase) - Complexity = 4; + // There's no chance of overlap if the load is invariant. + if (Load->isInvariant()) + return true; - if (AM.IndexReg.getNode()) - Complexity += 1; - else - AM.IndexReg = CurDAG->getRegister(0, VT); + // Otherwise we need to check whether there's an alias. + const Value *V1 = Load->getSrcValue(); + const Value *V2 = Store->getSrcValue(); + if (!V1 || !V2) + return false; - if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode())) - Complexity += 1; + // Reject equality. + uint64_t Size = Load->getMemoryVT().getStoreSize(); + int64_t End1 = Load->getSrcValueOffset() + Size; + int64_t End2 = Store->getSrcValueOffset() + Size; + if (V1 == V2 && End1 == End2) + return false; - if (Complexity > 2) { - getAddressOperands(AM, Base, Disp, Index); - return true; + return !AA->alias(AliasAnalysis::Location(V1, End1, Load->getTBAAInfo()), + AliasAnalysis::Location(V2, End2, Store->getTBAAInfo())); +} + +bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const { + StoreSDNode *Store = cast(N); + LoadSDNode *Load = cast(Store->getValue()); + + // Prefer not to use MVC if either address can use ... RELATIVE LONG + // instructions. + uint64_t Size = Load->getMemoryVT().getStoreSize(); + if (Size > 1 && Size <= 8) { + // Prefer LHRL, LRL and LGRL. + if (SystemZISD::isPCREL(Load->getBasePtr().getOpcode())) + return false; + // Prefer STHRL, STRL and STGRL. + if (SystemZISD::isPCREL(Store->getBasePtr().getOpcode())) + return false; } - return false; + return canUseBlockOperation(Store, Load); } -bool SystemZDAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, - SDValue &Base, SDValue &Disp, SDValue &Index) { - if (ISD::isNON_EXTLoad(N.getNode()) && - IsLegalToFold(N, P, P, OptLevel)) - return SelectAddrRRI20(P, N.getOperand(1), Base, Disp, Index); - return false; +bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N, + unsigned I) const { + StoreSDNode *StoreA = cast(N); + LoadSDNode *LoadA = cast(StoreA->getValue().getOperand(1 - I)); + LoadSDNode *LoadB = cast(StoreA->getValue().getOperand(I)); + return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB); } SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) { - EVT NVT = Node->getValueType(0); - DebugLoc dl = Node->getDebugLoc(); - unsigned Opcode = Node->getOpcode(); - // Dump information about the Node being selected DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n"); // If we have a custom node, we already have selected! if (Node->isMachineOpcode()) { DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); - return NULL; // Already selected. + Node->setNodeId(-1); + return 0; } + unsigned Opcode = Node->getOpcode(); + SDNode *ResNode = 0; switch (Opcode) { - default: break; - case ISD::SDIVREM: { - unsigned Opc, MOpc; - SDValue N0 = Node->getOperand(0); - SDValue N1 = Node->getOperand(1); - - EVT ResVT; - bool is32Bit = false; - switch (NVT.getSimpleVT().SimpleTy) { - default: assert(0 && "Unsupported VT!"); - case MVT::i32: - Opc = SystemZ::SDIVREM32r; MOpc = SystemZ::SDIVREM32m; - ResVT = MVT::v2i64; - is32Bit = true; - break; - case MVT::i64: - Opc = SystemZ::SDIVREM64r; MOpc = SystemZ::SDIVREM64m; - ResVT = MVT::v2i64; - break; - } - - SDValue Tmp0, Tmp1, Tmp2; - bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2); - - // Prepare the dividend - SDNode *Dividend; - if (is32Bit) - Dividend = CurDAG->getMachineNode(SystemZ::MOVSX64rr32, dl, MVT::i64, N0); - else - Dividend = N0.getNode(); - - // Insert prepared dividend into suitable 'subreg' - SDNode *Tmp = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, - dl, ResVT); - Dividend = - CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl, ResVT, - SDValue(Tmp, 0), SDValue(Dividend, 0), - CurDAG->getTargetConstant(SystemZ::subreg_odd, MVT::i32)); - - SDNode *Result; - SDValue DivVal = SDValue(Dividend, 0); - if (foldedLoad) { - SDValue Ops[] = { DivVal, Tmp0, Tmp1, Tmp2, N1.getOperand(0) }; - Result = CurDAG->getMachineNode(MOpc, dl, ResVT, MVT::Other, - Ops, array_lengthof(Ops)); - // Update the chain. - ReplaceUses(N1.getValue(1), SDValue(Result, 1)); - } else { - Result = CurDAG->getMachineNode(Opc, dl, ResVT, SDValue(Dividend, 0), N1); - } - - // Copy the division (odd subreg) result, if it is needed. - if (!SDValue(Node, 0).use_empty()) { - unsigned SubRegIdx = (is32Bit ? - SystemZ::subreg_odd32 : SystemZ::subreg_odd); - SDNode *Div = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, - dl, NVT, - SDValue(Result, 0), - CurDAG->getTargetConstant(SubRegIdx, - MVT::i32)); - - ReplaceUses(SDValue(Node, 0), SDValue(Div, 0)); - DEBUG(errs() << "=> "; Result->dump(CurDAG); errs() << "\n"); - } - - // Copy the remainder (even subreg) result, if it is needed. - if (!SDValue(Node, 1).use_empty()) { - unsigned SubRegIdx = (is32Bit ? - SystemZ::subreg_32bit : SystemZ::subreg_even); - SDNode *Rem = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, - dl, NVT, - SDValue(Result, 0), - CurDAG->getTargetConstant(SubRegIdx, - MVT::i32)); - - ReplaceUses(SDValue(Node, 1), SDValue(Rem, 0)); - DEBUG(errs() << "=> "; Result->dump(CurDAG); errs() << "\n"); - } - - return NULL; - } - case ISD::UDIVREM: { - unsigned Opc, MOpc, ClrOpc; - SDValue N0 = Node->getOperand(0); - SDValue N1 = Node->getOperand(1); - EVT ResVT; - - bool is32Bit = false; - switch (NVT.getSimpleVT().SimpleTy) { - default: assert(0 && "Unsupported VT!"); - case MVT::i32: - Opc = SystemZ::UDIVREM32r; MOpc = SystemZ::UDIVREM32m; - ClrOpc = SystemZ::MOV64Pr0_even; - ResVT = MVT::v2i32; - is32Bit = true; - break; - case MVT::i64: - Opc = SystemZ::UDIVREM64r; MOpc = SystemZ::UDIVREM64m; - ClrOpc = SystemZ::MOV128r0_even; - ResVT = MVT::v2i64; - break; - } + case ISD::OR: + if (Node->getOperand(1).getOpcode() != ISD::Constant) + ResNode = tryRxSBG(Node, SystemZ::ROSBG); + goto or_xor; + + case ISD::XOR: + if (Node->getOperand(1).getOpcode() != ISD::Constant) + ResNode = tryRxSBG(Node, SystemZ::RXSBG); + // Fall through. + or_xor: + // If this is a 64-bit operation in which both 32-bit halves are nonzero, + // split the operation into two. + if (!ResNode && Node->getValueType(0) == MVT::i64) + if (ConstantSDNode *Op1 = dyn_cast(Node->getOperand(1))) { + uint64_t Val = Op1->getZExtValue(); + if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) + Node = splitLargeImmediate(Opcode, Node, Node->getOperand(0), + Val - uint32_t(Val), uint32_t(Val)); + } + break; - SDValue Tmp0, Tmp1, Tmp2; - bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2); - - // Prepare the dividend - SDNode *Dividend = N0.getNode(); - - // Insert prepared dividend into suitable 'subreg' - SDNode *Tmp = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, - dl, ResVT); - { - unsigned SubRegIdx = (is32Bit ? - SystemZ::subreg_odd32 : SystemZ::subreg_odd); - Dividend = - CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl, ResVT, - SDValue(Tmp, 0), SDValue(Dividend, 0), - CurDAG->getTargetConstant(SubRegIdx, MVT::i32)); - } + case ISD::AND: + if (Node->getOperand(1).getOpcode() != ISD::Constant) + ResNode = tryRxSBG(Node, SystemZ::RNSBG); + // Fall through. + case ISD::ROTL: + case ISD::SHL: + case ISD::SRL: + if (!ResNode) + ResNode = tryRISBGZero(Node); + break; - // Zero out even subreg - Dividend = CurDAG->getMachineNode(ClrOpc, dl, ResVT, SDValue(Dividend, 0)); - - SDValue DivVal = SDValue(Dividend, 0); - SDNode *Result; - if (foldedLoad) { - SDValue Ops[] = { DivVal, Tmp0, Tmp1, Tmp2, N1.getOperand(0) }; - Result = CurDAG->getMachineNode(MOpc, dl, ResVT, MVT::Other, - Ops, array_lengthof(Ops)); - // Update the chain. - ReplaceUses(N1.getValue(1), SDValue(Result, 1)); - } else { - Result = CurDAG->getMachineNode(Opc, dl, ResVT, DivVal, N1); + case ISD::Constant: + // If this is a 64-bit constant that is out of the range of LLILF, + // LLIHF and LGFI, split it into two 32-bit pieces. + if (Node->getValueType(0) == MVT::i64) { + uint64_t Val = cast(Node)->getZExtValue(); + if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) + Node = splitLargeImmediate(ISD::OR, Node, SDValue(), + Val - uint32_t(Val), uint32_t(Val)); } + break; - // Copy the division (odd subreg) result, if it is needed. - if (!SDValue(Node, 0).use_empty()) { - unsigned SubRegIdx = (is32Bit ? - SystemZ::subreg_odd32 : SystemZ::subreg_odd); - SDNode *Div = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, - dl, NVT, - SDValue(Result, 0), - CurDAG->getTargetConstant(SubRegIdx, - MVT::i32)); - ReplaceUses(SDValue(Node, 0), SDValue(Div, 0)); - DEBUG(errs() << "=> "; Result->dump(CurDAG); errs() << "\n"); + case ISD::ATOMIC_LOAD_SUB: + // Try to convert subtractions of constants to additions. + if (ConstantSDNode *Op2 = dyn_cast(Node->getOperand(2))) { + uint64_t Value = -Op2->getZExtValue(); + EVT VT = Node->getValueType(0); + if (VT == MVT::i32 || isInt<32>(Value)) { + SDValue Ops[] = { Node->getOperand(0), Node->getOperand(1), + CurDAG->getConstant(int32_t(Value), VT) }; + Node = CurDAG->MorphNodeTo(Node, ISD::ATOMIC_LOAD_ADD, + Node->getVTList(), Ops, array_lengthof(Ops)); + } } + break; - // Copy the remainder (even subreg) result, if it is needed. - if (!SDValue(Node, 1).use_empty()) { - unsigned SubRegIdx = (is32Bit ? - SystemZ::subreg_32bit : SystemZ::subreg_even); - SDNode *Rem = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, - dl, NVT, - SDValue(Result, 0), - CurDAG->getTargetConstant(SubRegIdx, - MVT::i32)); - ReplaceUses(SDValue(Node, 1), SDValue(Rem, 0)); - DEBUG(errs() << "=> "; Result->dump(CurDAG); errs() << "\n"); + case SystemZISD::SELECT_CCMASK: { + SDValue Op0 = Node->getOperand(0); + SDValue Op1 = Node->getOperand(1); + // Prefer to put any load first, so that it can be matched as a + // conditional load. + if (Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) { + SDValue CCValid = Node->getOperand(2); + SDValue CCMask = Node->getOperand(3); + uint64_t ConstCCValid = + cast(CCValid.getNode())->getZExtValue(); + uint64_t ConstCCMask = + cast(CCMask.getNode())->getZExtValue(); + // Invert the condition. + CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask, + CCMask.getValueType()); + SDValue Op4 = Node->getOperand(4); + Node = CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4); } - - return NULL; + break; } } // Select the default instruction - SDNode *ResNode = SelectCode(Node); + if (!ResNode) + ResNode = SelectCode(Node); DEBUG(errs() << "=> "; if (ResNode == NULL || ResNode == Node) @@ -778,3 +1139,21 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) { ); return ResNode; } + +bool SystemZDAGToDAGISel:: +SelectInlineAsmMemoryOperand(const SDValue &Op, + char ConstraintCode, + std::vector &OutOps) { + assert(ConstraintCode == 'm' && "Unexpected constraint code"); + // Accept addresses with short displacements, which are compatible + // with Q, R, S and T. But keep the index operand for future expansion. + SDValue Base, Disp, Index; + if (!selectBDXAddr(SystemZAddressingMode::FormBD, + SystemZAddressingMode::Disp12Only, + Op, Base, Disp, Index)) + return true; + OutOps.push_back(Base); + OutOps.push_back(Disp); + OutOps.push_back(Index); + return false; +}