X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FPowerPC%2FPPCISelLowering.h;h=c93de430fd0576585424bbbd16131e7bc7291bde;hb=e97d93757641299b2be9d10e7e7caf5fd6855331;hp=a2a824106b1463b12b331dddbfda0fb98bcaebca;hpb=a40d4ae47869167fc8cdfa88c041dca93154ef04;p=oota-llvm.git diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index a2a824106b1..c93de430fd0 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -24,7 +24,7 @@ namespace llvm { namespace PPCISD { - enum NodeType { + enum NodeType : unsigned { // Start the numbering where the builtin ops and target ops leave off. FIRST_NUMBER = ISD::BUILTIN_OP_END, @@ -71,8 +71,6 @@ namespace llvm { /// though these are usually folded into other nodes. Hi, Lo, - TOC_ENTRY, - /// The following two target-specific nodes are used for calls through /// function pointers in the 64-bit SVR4 ABI. @@ -121,6 +119,15 @@ namespace llvm { /// resultant GPR. Bits corresponding to other CR regs are undefined. MFOCRF, + /// Direct move from a VSX register to a GPR + MFVSR, + + /// Direct move from a GPR to a VSX register (algebraic) + MTVSRA, + + /// Direct move from a GPR to a VSX register (zero) + MTVSRZ, + // FIXME: Remove these once the ANDI glue bug is fixed: /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the /// eq or gt bit of CR0 after executing andi. x, 1. This is used to @@ -168,14 +175,6 @@ namespace llvm { /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register. MFFS, - /// LARX = This corresponds to PPC l{w|d}arx instrcution: load and - /// reserve indexed. This is used to implement atomic operations. - LARX, - - /// STCX = This corresponds to PPC stcx. instrcution: store conditional - /// indexed. This is used to implement atomic operations. - STCX, - /// TC_RETURN - A tail call return. /// operand #0 chain /// operand #1 callee (register or absolute) @@ -276,6 +275,16 @@ namespace llvm { /// operand identifies the operating system entry point. SC, + /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer. + CLRBHRB, + + /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch + /// history rolling buffer entry. + MFBHRBE, + + /// CHAIN = RFEBB CHAIN, State - Return from event-based branch. + RFEBB, + /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little /// endian. Maps to an xxswapd instruction that corrects an lxvd2x /// or stxvd2x instruction. The chain is necessary because the @@ -337,7 +346,12 @@ namespace llvm { /// QBRC, CHAIN = QVLFSb CHAIN, Ptr /// The 4xf32 load used for v4i1 constants. - QVLFSb + QVLFSb, + + /// GPRC = TOC_ENTRY GA, TOC + /// Loads the entry for GA from the TOC, where the TOC base is given by + /// the last operand. + TOC_ENTRY }; } @@ -353,6 +367,11 @@ namespace llvm { bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG); + /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a + /// VPKUDUM instruction. + bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, + SelectionDAG &DAG); + /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, @@ -373,10 +392,6 @@ namespace llvm { /// VSPLTB/VSPLTH/VSPLTW. bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); - /// isAllNegativeZeroVector - Returns true if all elements of build_vector - /// are -0.0. - bool isAllNegativeZeroVector(SDNode *N); - /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG); @@ -486,7 +501,8 @@ namespace llvm { EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const override; MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, - MachineBasicBlock *MBB, bool is64Bit, + MachineBasicBlock *MBB, + unsigned AtomicSize, unsigned BinOpcode) const; MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB, @@ -506,9 +522,10 @@ namespace llvm { ConstraintWeight getSingleConstraintMatchWeight( AsmOperandInfo &info, const char *constraint) const override; - std::pair - getRegForInlineAsmConstraint(const std::string &Constraint, - MVT VT) const override; + std::pair + getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + const std::string &Constraint, + MVT VT) const override; /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. This is the actual @@ -522,6 +539,21 @@ namespace llvm { std::vector &Ops, SelectionDAG &DAG) const override; + unsigned getInlineAsmMemConstraint( + const std::string &ConstraintCode) const override { + if (ConstraintCode == "es") + return InlineAsm::Constraint_es; + else if (ConstraintCode == "o") + return InlineAsm::Constraint_o; + else if (ConstraintCode == "Q") + return InlineAsm::Constraint_Q; + else if (ConstraintCode == "Z") + return InlineAsm::Constraint_Z; + else if (ConstraintCode == "Zy") + return InlineAsm::Constraint_Zy; + return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); + } + /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override; @@ -637,6 +669,10 @@ namespace llvm { void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, SelectionDAG &DAG, SDLoc dl) const; + SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG, + SDLoc dl) const; + SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG, + SDLoc dl) const; SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;