X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FTargetSelectionDAG.td;h=38a604653f5d56e4c603476494bf32e6cfdb02a6;hb=4a307ecce68f90e0eebf1ded52b947816cdc2304;hp=2fbe476770838e1e0bc0a2617d38e296db9de208;hpb=999f3b538f420522e814fa649d13cbd534c033ce;p=oota-llvm.git diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td index 2fbe4767708..38a604653f5 100644 --- a/lib/Target/TargetSelectionDAG.td +++ b/lib/Target/TargetSelectionDAG.td @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by the LLVM research group and is distributed under -// the University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -52,14 +52,21 @@ class SDTCisOpSmallerThanOp : SDTypeConstraint{ } /// SDTCisIntVectorOfSameSize - This indicates that ThisOp and OtherOp are -/// packed vector types, and that ThisOp is the result of -/// MVT::getIntVectorWithNumElements with the number of elements that ThisOp -/// has. +/// vector types, and that ThisOp is the result of +/// MVT::getIntVectorWithNumElements with the number of elements +/// that ThisOp has. class SDTCisIntVectorOfSameSize : SDTypeConstraint { int OtherOpNum = OtherOp; } +/// SDTCisEltOfVec - This indicates that ThisOp is a scalar type of the same +/// type as the element type of OtherOp, which is a vector type. +class SDTCisEltOfVec + : SDTypeConstraint { + int OtherOpNum = OtherOp; +} + //===----------------------------------------------------------------------===// // Selection DAG Type Profile definitions. // @@ -150,7 +157,11 @@ def SDTBrcond : SDTypeProfile<0, 2, [ // brcond SDTCisInt<0>, SDTCisVT<1, OtherVT> ]>; -def SDTRet : SDTypeProfile<0, 0, []>; // ret +def SDTBrind : SDTypeProfile<0, 1, [ // brind + SDTCisPtrTy<0> +]>; + +def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap def SDTLoad : SDTypeProfile<1, 1, [ // load SDTCisPtrTy<1> @@ -160,19 +171,39 @@ def SDTStore : SDTypeProfile<0, 2, [ // store SDTCisPtrTy<1> ]>; -def SDTExtLoad : SDTypeProfile<1, 3, [ // extload - SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT> -]>; -def SDTIntExtLoad : SDTypeProfile<1, 3, [ // sextload, zextload - SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT> -]>; -def SDTTruncStore : SDTypeProfile<0, 4, [ // truncstore - SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT> +def SDTIStore : SDTypeProfile<1, 3, [ // indexed store + SDTCisSameAs<0, 2>, SDTCisPtrTy<0>, SDTCisPtrTy<3> ]>; def SDTVecShuffle : SDTypeProfile<1, 3, [ SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisIntVectorOfSameSize<3, 0> ]>; +def SDTVecExtract : SDTypeProfile<1, 2, [ // vector extract + SDTCisEltOfVec<0, 1>, SDTCisPtrTy<2> +]>; +def SDTVecInsert : SDTypeProfile<1, 3, [ // vector insert + SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3> +]>; + +def STDPrefetch : SDTypeProfile<0, 3, [ // prefetch + SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisInt<1> +]>; + +def STDMemBarrier : SDTypeProfile<0, 5, [ // memory barier + SDTCisSameAs<0,1>, SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisSameAs<0,4>, + SDTCisInt<0> +]>; +def STDAtomic3 : SDTypeProfile<1, 3, [ + SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisInt<0>, SDTCisPtrTy<1> +]>; +def STDAtomic2 : SDTypeProfile<1, 2, [ + SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1> +]>; + +class SDCallSeqStart constraints> : + SDTypeProfile<0, 1, constraints>; +class SDCallSeqEnd constraints> : + SDTypeProfile<0, 2, constraints>; //===----------------------------------------------------------------------===// // Selection DAG Node Properties. @@ -186,6 +217,9 @@ def SDNPHasChain : SDNodeProperty; // R/W chain operand and result def SDNPOutFlag : SDNodeProperty; // Write a flag result def SDNPInFlag : SDNodeProperty; // Read a flag operand def SDNPOptInFlag : SDNodeProperty; // Optionally read a flag operand +def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'. +def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'. +def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'. //===----------------------------------------------------------------------===// // Selection DAG Node definitions. @@ -199,12 +233,13 @@ class SDNode; -def fpimm : SDNode<"ISD::TargetConstantFP", - SDTFPLeaf, [], "ConstantFPSDNode">; +def fpimm : SDNode<"ISD::ConstantFP", SDTFPLeaf , [], "ConstantFPSDNode">; def vt : SDNode<"ISD::VALUETYPE" , SDTOther , [], "VTSDNode">; def bb : SDNode<"ISD::BasicBlock", SDTOther , [], "BasicBlockSDNode">; def cond : SDNode<"ISD::CONDCODE" , SDTOther , [], "CondCodeSDNode">; @@ -213,10 +248,18 @@ def globaladdr : SDNode<"ISD::GlobalAddress", SDTPtrLeaf, [], "GlobalAddressSDNode">; def tglobaladdr : SDNode<"ISD::TargetGlobalAddress", SDTPtrLeaf, [], "GlobalAddressSDNode">; +def globaltlsaddr : SDNode<"ISD::GlobalTLSAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; +def tglobaltlsaddr : SDNode<"ISD::TargetGlobalTLSAddress", SDTPtrLeaf, [], + "GlobalAddressSDNode">; def constpool : SDNode<"ISD::ConstantPool", SDTPtrLeaf, [], "ConstantPoolSDNode">; def tconstpool : SDNode<"ISD::TargetConstantPool", SDTPtrLeaf, [], "ConstantPoolSDNode">; +def jumptable : SDNode<"ISD::JumpTable", SDTPtrLeaf, [], + "JumpTableSDNode">; +def tjumptable : SDNode<"ISD::TargetJumpTable", SDTPtrLeaf, [], + "JumpTableSDNode">; def frameindex : SDNode<"ISD::FrameIndex", SDTPtrLeaf, [], "FrameIndexSDNode">; def tframeindex : SDNode<"ISD::TargetFrameIndex", SDTPtrLeaf, [], @@ -267,6 +310,9 @@ def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>; def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>; def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>; def bitconvert : SDNode<"ISD::BIT_CONVERT", SDTUnaryOp>; +def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>; +def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>; + def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>; def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>; @@ -291,35 +337,79 @@ def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>; def setcc : SDNode<"ISD::SETCC" , SDTSetCC>; def select : SDNode<"ISD::SELECT" , SDTSelect>; def selectcc : SDNode<"ISD::SELECT_CC" , SDTSelectCC>; +def vsetcc : SDNode<"ISD::VSETCC" , SDTSetCC>; def brcond : SDNode<"ISD::BRCOND" , SDTBrcond, [SDNPHasChain]>; +def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>; def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>; -def ret : SDNode<"ISD::RET" , SDTRet, [SDNPHasChain]>; - -def load : SDNode<"ISD::LOAD" , SDTLoad, [SDNPHasChain]>; -def store : SDNode<"ISD::STORE" , SDTStore, [SDNPHasChain]>; - -// Do not use sextld and zextld directly. Use sextload and zextload (see -// below) which pass in a dummy srcvalue node which tblgen will skip over. -def sextld : SDNode<"ISD::SEXTLOAD" , SDTIntExtLoad, [SDNPHasChain]>; -def zextld : SDNode<"ISD::ZEXTLOAD" , SDTIntExtLoad, [SDNPHasChain]>; -def extld : SDNode<"ISD::EXTLOAD" , SDTExtLoad, [SDNPHasChain]>; -def truncst : SDNode<"ISD::TRUNCSTORE" , SDTTruncStore, [SDNPHasChain]>; +def ret : SDNode<"ISD::RET" , SDTNone, [SDNPHasChain]>; +def trap : SDNode<"ISD::TRAP" , SDTNone, + [SDNPHasChain, SDNPSideEffect]>; + +def prefetch : SDNode<"ISD::PREFETCH" , STDPrefetch, + [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>; + +def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier, + [SDNPHasChain, SDNPSideEffect]>; + +// Do not use atomic_* directly, use atomic_*_size (see below) +def atomic_lcs : SDNode<"ISD::ATOMIC_LCS" , STDAtomic3, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_las : SDNode<"ISD::ATOMIC_LAS" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_lss : SDNode<"ISD::ATOMIC_LSS" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; +def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>; + +// Do not use ld, st directly. Use load, extload, sextload, zextload, store, +// and truncst (see below). +def ld : SDNode<"ISD::LOAD" , SDTLoad, + [SDNPHasChain, SDNPMayLoad]>; +def st : SDNode<"ISD::STORE" , SDTStore, + [SDNPHasChain, SDNPMayStore]>; +def ist : SDNode<"ISD::STORE" , SDTIStore, + [SDNPHasChain, SDNPMayStore]>; def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>; def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, 0, []>, []>; def scalar_to_vector : SDNode<"ISD::SCALAR_TO_VECTOR", SDTypeProfile<1, 1, []>, []>; +def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT", + SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>; +def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT", + SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>; + +def extract_subreg : SDNode<"ISD::EXTRACT_SUBREG", + SDTypeProfile<1, 2, []>>; +def insert_subreg : SDNode<"ISD::INSERT_SUBREG", + SDTypeProfile<1, 3, []>>; // Nodes for intrinsics, you should use the intrinsic itself and let tblgen use // these internally. Don't reference these directly. -def intrinsic_void : SDNode<"ISD::INTRINSIC", +def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID", SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>, [SDNPHasChain]>; -def intrinsic_w_chain : SDNode<"ISD::INTRINSIC", +def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN", SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, [SDNPHasChain]>; -def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC", +def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN", SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>; @@ -379,32 +469,377 @@ class PatLeaf // Leaf fragments. -def vtInt : PatLeaf<(vt), [{ return MVT::isInteger(N->getVT()); }]>; -def vtFP : PatLeaf<(vt), [{ return MVT::isFloatingPoint(N->getVT()); }]>; +def vtInt : PatLeaf<(vt), [{ return N->getVT().isInteger(); }]>; +def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>; def immAllOnes : PatLeaf<(imm), [{ return N->isAllOnesValue(); }]>; def immAllOnesV: PatLeaf<(build_vector), [{ return ISD::isBuildVectorAllOnes(N); }]>; +def immAllOnesV_bc: PatLeaf<(bitconvert), [{ + return ISD::isBuildVectorAllOnes(N); +}]>; def immAllZerosV: PatLeaf<(build_vector), [{ return ISD::isBuildVectorAllZeros(N); }]>; +def immAllZerosV_bc: PatLeaf<(bitconvert), [{ + return ISD::isBuildVectorAllZeros(N); +}]>; + + // Other helper fragments. def not : PatFrag<(ops node:$in), (xor node:$in, immAllOnes)>; def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>; +def vnot_conv : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV_bc)>; def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>; -// extending load & truncstore fragments. -def sextload : PatFrag<(ops node:$ptr, node:$vt), - (sextld node:$ptr, srcvalue:$dummy, node:$vt)>; -def zextload : PatFrag<(ops node:$ptr, node:$vt), - (zextld node:$ptr, srcvalue:$dummy, node:$vt)>; -def extload : PatFrag<(ops node:$ptr, node:$vt), - (extld node:$ptr, srcvalue:$dummy, node:$vt)>; -def truncstore : PatFrag<(ops node:$val, node:$ptr, node:$vt), - (truncst node:$val, node:$ptr, srcvalue:$dummy, - node:$vt)>; +// load fragments. +def load : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::NON_EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED; + return false; +}]>; + +// extending load fragments. +def extloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i1; + return false; +}]>; +def extloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i8; + return false; +}]>; +def extloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i16; + return false; +}]>; +def extloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i32; + return false; +}]>; +def extloadf32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::f32; + return false; +}]>; +def extloadf64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::f64; + return false; +}]>; + +def sextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::SEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i1; + return false; +}]>; +def sextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::SEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i8; + return false; +}]>; +def sextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::SEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i16; + return false; +}]>; +def sextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::SEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i32; + return false; +}]>; + +def zextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::ZEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i1; + return false; +}]>; +def zextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::ZEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i8; + return false; +}]>; +def zextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::ZEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i16; + return false; +}]>; +def zextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::ZEXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getMemoryVT() == MVT::i32; + return false; +}]>; + +// store fragments. +def store : PatFrag<(ops node:$val, node:$ptr), + (st node:$val, node:$ptr), [{ + if (StoreSDNode *ST = dyn_cast(N)) + return !ST->isTruncatingStore() && + ST->getAddressingMode() == ISD::UNINDEXED; + return false; +}]>; + +// truncstore fragments. +def truncstorei8 : PatFrag<(ops node:$val, node:$ptr), + (st node:$val, node:$ptr), [{ + if (StoreSDNode *ST = dyn_cast(N)) + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8 && + ST->getAddressingMode() == ISD::UNINDEXED; + return false; +}]>; +def truncstorei16 : PatFrag<(ops node:$val, node:$ptr), + (st node:$val, node:$ptr), [{ + if (StoreSDNode *ST = dyn_cast(N)) + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16 && + ST->getAddressingMode() == ISD::UNINDEXED; + return false; +}]>; +def truncstorei32 : PatFrag<(ops node:$val, node:$ptr), + (st node:$val, node:$ptr), [{ + if (StoreSDNode *ST = dyn_cast(N)) + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32 && + ST->getAddressingMode() == ISD::UNINDEXED; + return false; +}]>; +def truncstoref32 : PatFrag<(ops node:$val, node:$ptr), + (st node:$val, node:$ptr), [{ + if (StoreSDNode *ST = dyn_cast(N)) + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32 && + ST->getAddressingMode() == ISD::UNINDEXED; + return false; +}]>; +def truncstoref64 : PatFrag<(ops node:$val, node:$ptr), + (st node:$val, node:$ptr), [{ + if (StoreSDNode *ST = dyn_cast(N)) + return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f64 && + ST->getAddressingMode() == ISD::UNINDEXED; + return false; +}]>; + +// indexed store fragments. +def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + !ST->isTruncatingStore(); + } + return false; +}]>; + +def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1; + } + return false; +}]>; +def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8; + } + return false; +}]>; +def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16; + } + return false; +}]>; +def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32; + } + return false; +}]>; +def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32; + } + return false; +}]>; + +def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), + (ist node:$val, node:$ptr, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return !ST->isTruncatingStore() && + (AM == ISD::POST_INC || AM == ISD::POST_DEC); + } + return false; +}]>; + +def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1; + } + return false; +}]>; +def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8; + } + return false; +}]>; +def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16; + } + return false; +}]>; +def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32; + } + return false; +}]>; +def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), + (ist node:$val, node:$base, node:$offset), [{ + if (StoreSDNode *ST = dyn_cast(N)) { + ISD::MemIndexedMode AM = ST->getAddressingMode(); + return (AM == ISD::POST_INC || AM == ISD::POST_DEC) && + ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32; + } + return false; +}]>; + +//Atomic patterns +def atomic_lcs_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i8; + return false; +}]>; +def atomic_lcs_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i16; + return false; +}]>; +def atomic_lcs_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i32; + return false; +}]>; +def atomic_lcs_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp), + (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i64; + return false; +}]>; + +def atomic_las_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_las node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i8; + return false; +}]>; +def atomic_las_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_las node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i16; + return false; +}]>; +def atomic_las_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_las node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i32; + return false; +}]>; +def atomic_las_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_las node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i64; + return false; +}]>; + +def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_swap node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i8; + return false; +}]>; +def atomic_swap_16 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_swap node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i16; + return false; +}]>; +def atomic_swap_32 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_swap node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i32; + return false; +}]>; +def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc), + (atomic_swap node:$ptr, node:$inc), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getVT() == MVT::i64; + return false; +}]>; + + // setcc convenience fragments. def setoeq : PatFrag<(ops node:$lhs, node:$rhs), @@ -460,9 +895,10 @@ def setne : PatFrag<(ops node:$lhs, node:$rhs), // class Pattern resultInstrs> { - dag PatternToMatch = patternToMatch; - list ResultInstrs = resultInstrs; - list Predicates = []; // See class Instruction in Target.td. + dag PatternToMatch = patternToMatch; + list ResultInstrs = resultInstrs; + list Predicates = []; // See class Instruction in Target.td. + int AddedComplexity = 0; // See class Instruction in Target.td. } // Pat - A simple (but common) form of a pattern, which produces a simple result @@ -472,17 +908,27 @@ class Pat : Pattern; //===----------------------------------------------------------------------===// // Complex pattern definitions. // + +class CPAttribute; +// Pass the parent Operand as root to CP function rather +// than the root of the sub-DAG +def CPAttrParentAsRoot : CPAttribute; + // Complex patterns, e.g. X86 addressing mode, requires pattern matching code // in C++. NumOperands is the number of operands returned by the select function; // SelectFunc is the name of the function used to pattern match the max. pattern; // RootNodes are the list of possible root nodes of the sub-dags to match. // e.g. X86 addressing mode - def addr : ComplexPattern<4, "SelectAddr", [add]>; // -class ComplexPattern roots = []> { +class ComplexPattern roots = [], list props = [], + list attrs = []> { ValueType Ty = ty; int NumOperands = numops; string SelectFunc = fn; list RootNodes = roots; + list Properties = props; + list Attributes = attrs; } //===----------------------------------------------------------------------===// @@ -491,9 +937,3 @@ class ComplexPattern roots = [ def SDT_dwarf_loc : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>; def dwarf_loc : SDNode<"ISD::DEBUG_LOC", SDT_dwarf_loc,[SDNPHasChain]>; - -def SDT_dwarf_label : SDTypeProfile<0, 1, [SDTCisInt<0>]>; -def dwarf_label : SDNode<"ISD::DEBUG_LABEL", SDT_dwarf_label,[SDNPHasChain]>; - - -