X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FTargetSelectionDAG.td;h=38a604653f5d56e4c603476494bf32e6cfdb02a6;hb=4a307ecce68f90e0eebf1ded52b947816cdc2304;hp=2560d86f52df8c7206ee047c8be095c5c17f11ad;hpb=ddf89566a93081cb230bb9406a72ab2d3eada4a7;p=oota-llvm.git diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td index 2560d86f52d..38a604653f5 100644 --- a/lib/Target/TargetSelectionDAG.td +++ b/lib/Target/TargetSelectionDAG.td @@ -53,13 +53,20 @@ class SDTCisOpSmallerThanOp : SDTypeConstraint{ /// SDTCisIntVectorOfSameSize - This indicates that ThisOp and OtherOp are /// vector types, and that ThisOp is the result of -/// MVT::getIntVectorWithNumElements with the number of elements that ThisOp -/// has. +/// MVT::getIntVectorWithNumElements with the number of elements +/// that ThisOp has. class SDTCisIntVectorOfSameSize : SDTypeConstraint { int OtherOpNum = OtherOp; } +/// SDTCisEltOfVec - This indicates that ThisOp is a scalar type of the same +/// type as the element type of OtherOp, which is a vector type. +class SDTCisEltOfVec + : SDTypeConstraint { + int OtherOpNum = OtherOp; +} + //===----------------------------------------------------------------------===// // Selection DAG Type Profile definitions. // @@ -171,6 +178,27 @@ def SDTIStore : SDTypeProfile<1, 3, [ // indexed store def SDTVecShuffle : SDTypeProfile<1, 3, [ SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisIntVectorOfSameSize<3, 0> ]>; +def SDTVecExtract : SDTypeProfile<1, 2, [ // vector extract + SDTCisEltOfVec<0, 1>, SDTCisPtrTy<2> +]>; +def SDTVecInsert : SDTypeProfile<1, 3, [ // vector insert + SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3> +]>; + +def STDPrefetch : SDTypeProfile<0, 3, [ // prefetch + SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisInt<1> +]>; + +def STDMemBarrier : SDTypeProfile<0, 5, [ // memory barier + SDTCisSameAs<0,1>, SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisSameAs<0,4>, + SDTCisInt<0> +]>; +def STDAtomic3 : SDTypeProfile<1, 3, [ + SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisInt<0>, SDTCisPtrTy<1> +]>; +def STDAtomic2 : SDTypeProfile<1, 2, [ + SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1> +]>; class SDCallSeqStart constraints> : SDTypeProfile<0, 1, constraints>; @@ -211,8 +239,7 @@ def node; def srcvalue; def imm : SDNode<"ISD::Constant" , SDTIntLeaf , [], "ConstantSDNode">; -def fpimm : SDNode<"ISD::TargetConstantFP", - SDTFPLeaf, [], "ConstantFPSDNode">; +def fpimm : SDNode<"ISD::ConstantFP", SDTFPLeaf , [], "ConstantFPSDNode">; def vt : SDNode<"ISD::VALUETYPE" , SDTOther , [], "VTSDNode">; def bb : SDNode<"ISD::BasicBlock", SDTOther , [], "BasicBlockSDNode">; def cond : SDNode<"ISD::CONDCODE" , SDTOther , [], "CondCodeSDNode">; @@ -283,6 +310,9 @@ def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>; def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>; def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>; def bitconvert : SDNode<"ISD::BIT_CONVERT", SDTUnaryOp>; +def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>; +def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>; + def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>; def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>; @@ -307,6 +337,7 @@ def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>; def setcc : SDNode<"ISD::SETCC" , SDTSetCC>; def select : SDNode<"ISD::SELECT" , SDTSelect>; def selectcc : SDNode<"ISD::SELECT_CC" , SDTSelectCC>; +def vsetcc : SDNode<"ISD::VSETCC" , SDTSetCC>; def brcond : SDNode<"ISD::BRCOND" , SDTBrcond, [SDNPHasChain]>; def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>; @@ -315,6 +346,38 @@ def ret : SDNode<"ISD::RET" , SDTNone, [SDNPHasChain]>; def trap : SDNode<"ISD::TRAP" , SDTNone, [SDNPHasChain, SDNPSideEffect]>; +def prefetch : SDNode<"ISD::PREFETCH" , STDPrefetch, + [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>; + +def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier, + [SDNPHasChain, SDNPSideEffect]>; + +// Do not use atomic_* directly, use atomic_*_size (see below) +def atomic_lcs : SDNode<"ISD::ATOMIC_LCS" , STDAtomic3, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_las : SDNode<"ISD::ATOMIC_LAS" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_lss : SDNode<"ISD::ATOMIC_LSS" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + // Do not use ld, st directly. Use load, extload, sextload, zextload, store, // and truncst (see below). def ld : SDNode<"ISD::LOAD" , SDTLoad, @@ -406,8 +469,8 @@ class PatLeaf // Leaf fragments. -def vtInt : PatLeaf<(vt), [{ return MVT::isInteger(N->getVT()); }]>; -def vtFP : PatLeaf<(vt), [{ return MVT::isFloatingPoint(N->getVT()); }]>; +def vtInt : PatLeaf<(vt), [{ return N->getVT().isInteger(); }]>; +def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>; def immAllOnes : PatLeaf<(imm), [{ return N->isAllOnesValue(); }]>; def immAllOnesV: PatLeaf<(build_vector), [{ @@ -444,42 +507,42 @@ def extloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::EXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i1; + LD->getMemoryVT() == MVT::i1; return false; }]>; def extloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::EXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i8; + LD->getMemoryVT() == MVT::i8; return false; }]>; def extloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::EXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i16; + LD->getMemoryVT() == MVT::i16; return false; }]>; def extloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::EXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i32; + LD->getMemoryVT() == MVT::i32; return false; }]>; def extloadf32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::EXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::f32; + LD->getMemoryVT() == MVT::f32; return false; }]>; def extloadf64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::EXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::f64; + LD->getMemoryVT() == MVT::f64; return false; }]>; @@ -487,28 +550,28 @@ def sextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::SEXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i1; + LD->getMemoryVT() == MVT::i1; return false; }]>; def sextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::SEXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i8; + LD->getMemoryVT() == MVT::i8; return false; }]>; def sextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::SEXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i16; + LD->getMemoryVT() == MVT::i16; return false; }]>; def sextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::SEXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i32; + LD->getMemoryVT() == MVT::i32; return false; }]>; @@ -516,28 +579,28 @@ def zextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::ZEXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i1; + LD->getMemoryVT() == MVT::i1; return false; }]>; def zextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::ZEXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i8; + LD->getMemoryVT() == MVT::i8; return false; }]>; def zextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::ZEXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i16; + LD->getMemoryVT() == MVT::i16; return false; }]>; def zextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ if (LoadSDNode *LD = dyn_cast(N)) return LD->getExtensionType() == ISD::ZEXTLOAD && LD->getAddressingMode() == ISD::UNINDEXED && - LD->getLoadedVT() == MVT::i32; + LD->getMemoryVT() == MVT::i32; return false; }]>; @@ -554,35 +617,35 @@ def store : PatFrag<(ops node:$val, node:$ptr), def truncstorei8 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ if (StoreSDNode *ST = dyn_cast(N)) - return ST->isTruncatingStore() && ST->getStoredVT() == MVT::i8 && + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8 && ST->getAddressingMode() == ISD::UNINDEXED; return false; }]>; def truncstorei16 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ if (StoreSDNode *ST = dyn_cast(N)) - return ST->isTruncatingStore() && ST->getStoredVT() == MVT::i16 && + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16 && ST->getAddressingMode() == ISD::UNINDEXED; return false; }]>; def truncstorei32 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ if (StoreSDNode *ST = dyn_cast(N)) - return ST->isTruncatingStore() && ST->getStoredVT() == MVT::i32 && + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32 && ST->getAddressingMode() == ISD::UNINDEXED; return false; }]>; def truncstoref32 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ if (StoreSDNode *ST = dyn_cast(N)) - return ST->isTruncatingStore() && ST->getStoredVT() == MVT::f32 && + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32 && ST->getAddressingMode() == ISD::UNINDEXED; return false; }]>; def truncstoref64 : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr), [{ if (StoreSDNode *ST = dyn_cast(N)) - return ST->isTruncatingStore() && ST->getStoredVT() == MVT::f64 && + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f64 && ST->getAddressingMode() == ISD::UNINDEXED; return false; }]>; @@ -603,7 +666,7 @@ def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::i1; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1; } return false; }]>; @@ -612,7 +675,7 @@ def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::i8; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8; } return false; }]>; @@ -621,7 +684,7 @@ def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::i16; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16; } return false; }]>; @@ -630,7 +693,7 @@ def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::i32; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32; } return false; }]>; @@ -639,7 +702,7 @@ def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::f32; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32; } return false; }]>; @@ -659,7 +722,7 @@ def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::i1; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1; } return false; }]>; @@ -668,7 +731,7 @@ def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::i8; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8; } return false; }]>; @@ -677,7 +740,7 @@ def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::i16; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16; } return false; }]>; @@ -686,7 +749,7 @@ def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::i32; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32; } return false; }]>; @@ -695,11 +758,89 @@ def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), if (StoreSDNode *ST = dyn_cast(N)) { ISD::MemIndexedMode AM = ST->getAddressingMode(); return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && - ST->isTruncatingStore() && ST->getStoredVT() == MVT::f32; + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32; } return false; }]>; +//Atomic patterns +def atomic_lcs_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i8; + return false; +}]>; +def atomic_lcs_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i16; + return false; +}]>; +def atomic_lcs_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i32; + return false; +}]>; +def atomic_lcs_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i64; + return false; +}]>; + +def atomic_las_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_las node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i8; + return false; +}]>; +def atomic_las_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_las node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i16; + return false; +}]>; +def atomic_las_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_las node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i32; + return false; +}]>; +def atomic_las_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_las node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i64; + return false; +}]>; + +def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_swap node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i8; + return false; +}]>; +def atomic_swap_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_swap node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i16; + return false; +}]>; +def atomic_swap_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_swap node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i32; + return false; +}]>; +def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_swap node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i64; + return false; +}]>; + + + // setcc convenience fragments. def setoeq : PatFrag<(ops node:$lhs, node:$rhs), (setcc node:$lhs, node:$rhs, SETOEQ)>; @@ -767,6 +908,12 @@ class Pat : Pattern; //===----------------------------------------------------------------------===// // Complex pattern definitions. // + +class CPAttribute; +// Pass the parent Operand as root to CP function rather +// than the root of the sub-DAG +def CPAttrParentAsRoot : CPAttribute; + // Complex patterns, e.g. X86 addressing mode, requires pattern matching code // in C++. NumOperands is the number of operands returned by the select function; // SelectFunc is the name of the function used to pattern match the max. pattern; @@ -774,12 +921,14 @@ class Pat : Pattern; // e.g. X86 addressing mode - def addr : ComplexPattern<4, "SelectAddr", [add]>; // class ComplexPattern roots = [], list props = []> { + list roots = [], list props = [], + list attrs = []> { ValueType Ty = ty; int NumOperands = numops; string SelectFunc = fn; list RootNodes = roots; list Properties = props; + list Attributes = attrs; } //===----------------------------------------------------------------------===// @@ -788,6 +937,3 @@ class ComplexPattern, SDTCisInt<1>, SDTCisInt<2>]>; def dwarf_loc : SDNode<"ISD::DEBUG_LOC", SDT_dwarf_loc,[SDNPHasChain]>; - - -