//
// The LLVM Compiler Infrastructure
//
-// This file was developed by Chris Lattner and is distributed under
-// the University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
#define X86ISELLOWERING_H
#include "X86Subtarget.h"
+#include "X86RegisterInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/CallingConvLower.h"
namespace llvm {
namespace X86ISD {
// Start the numbering where the builtin ops leave off.
FIRST_NUMBER = ISD::BUILTIN_OP_END+X86::INSTRUCTION_LIST_END,
+ /// BSF - Bit scan forward.
+ /// BSR - Bit scan reverse.
+ BSF,
+ BSR,
+
/// SHLD, SHRD - Double shift instructions. These correspond to
/// X86::SHLDxx and X86::SHRDxx instructions.
SHLD,
/// to X86::ANDPS or X86::ANDPD.
FAND,
+ /// FOR - Bitwise logical OR of floating point values. This corresponds
+ /// to X86::ORPS or X86::ORPD.
+ FOR,
+
/// FXOR - Bitwise logical XOR of floating point values. This corresponds
/// to X86::XORPS or X86::XORPD.
FXOR,
+ /// FSRL - Bitwise logical right shift of floating point values. These
+ /// corresponds to X86::PSRLDQ.
+ FSRL,
+
/// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
/// integer source in memory and FP reg result. This corresponds to the
/// X86::FILD*m instructions. It has three inputs (token chain, address,
/// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
/// integer destination in memory and a FP reg source. This corresponds
/// to the X86::FIST*m instructions and the rounding mode change stuff. It
- /// has two inputs (token chain and address) and two outputs (int value and
- /// token chain).
+ /// has two inputs (token chain and address) and two outputs (int value
+ /// and token chain).
FP_TO_INT16_IN_MEM,
FP_TO_INT32_IN_MEM,
FP_TO_INT64_IN_MEM,
/// as.
FST,
- /// FP_SET_RESULT - This corresponds to FpGETRESULT pseudo instrcuction
- /// which copies from ST(0) to the destination. It takes a chain and writes
- /// a RFP result and a chain.
+ /// FP_GET_RESULT - This corresponds to FpGETRESULT pseudo instruction
+ /// which copies from ST(0) to the destination. It takes a chain and
+ /// writes a RFP result and a chain.
FP_GET_RESULT,
- /// FP_SET_RESULT - This corresponds to FpSETRESULT pseudo instrcuction
- /// which copies the source operand to ST(0). It takes a chain and writes
- /// a chain and a flag.
+ /// FP_SET_RESULT - This corresponds to FpSETRESULT pseudo instruction
+ /// which copies the source operand to ST(0). It takes a chain+value and
+ /// returns a chain and a flag.
FP_SET_RESULT,
/// CALL/TAILCALL - These operations represent an abstract X86 call
RDTSC_DAG,
/// X86 compare and logical compare instructions.
- CMP, TEST,
+ CMP, COMI, UCOMI,
/// X86 SetCC. Operand 1 is condition code, and operand 2 is the flag
/// operand produced by a CMP instruction.
SETCC,
/// X86 conditional moves. Operand 1 and operand 2 are the two values
- /// to select from (operand 1 is a R/W operand). Operand 3 is the condition
- /// code, and operand 4 is the flag operand produced by a CMP or TEST
- /// instruction. It also writes a flag result.
+ /// to select from (operand 1 is a R/W operand). Operand 3 is the
+ /// condition code, and operand 4 is the flag operand produced by a CMP
+ /// or TEST instruction. It also writes a flag result.
CMOV,
/// X86 conditional branches. Operand 1 is the chain operand, operand 2
/// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
REP_MOVS,
- /// LOAD_PACK Load a 128-bit packed float / double value. It has the same
- /// operands as a normal load.
- LOAD_PACK,
-
/// GlobalBaseReg - On Darwin, this node represents the result of the popl
/// at function entry, used for PIC code.
GlobalBaseReg,
- /// TCPWrapper - A wrapper node for TargetConstantPool,
+ /// Wrapper - A wrapper node for TargetConstantPool,
/// TargetExternalSymbol, and TargetGlobalAddress.
Wrapper,
+ /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
+ /// relative displacements.
+ WrapperRIP,
+
/// S2VEC - X86 version of SCALAR_TO_VECTOR. The destination base does not
/// have to match the operand type.
S2VEC,
- /// ZEXT_S2VEC - SCALAR_TO_VECTOR with zero extension. The destination base
- /// does not have to match the operand type.
- ZEXT_S2VEC,
- };
+ /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
+ /// i32, corresponds to X86::PEXTRW.
+ PEXTRW,
+
+ /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
+ /// corresponds to X86::PINSRW.
+ PINSRW,
+
+ /// FMAX, FMIN - Floating point max and min.
+ ///
+ FMAX, FMIN,
+
+ /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
+ /// approximation. Note that these typically require refinement
+ /// in order to obtain suitable precision.
+ FRSQRT, FRCP,
+
+ // Thread Local Storage
+ TLSADDR, THREAD_POINTER,
- // X86 specific condition code. These correspond to X86_*_COND in
- // X86InstrInfo.td. They must be kept in synch.
- enum CondCode {
- COND_A = 0,
- COND_AE = 1,
- COND_B = 2,
- COND_BE = 3,
- COND_E = 4,
- COND_G = 5,
- COND_GE = 6,
- COND_L = 7,
- COND_LE = 8,
- COND_NE = 9,
- COND_NO = 10,
- COND_NP = 11,
- COND_NS = 12,
- COND_O = 13,
- COND_P = 14,
- COND_S = 15,
- COND_INVALID
+ // Exception Handling helpers
+ EH_RETURN,
+
+ // tail call return
+ // oeprand #0 chain
+ // operand #1 callee (register or absolute)
+ // operand #2 stack adjustment
+ // operand #3 optional in flag
+ TC_RETURN,
+
+ // Store FP control world into i16 memory
+ FNSTCW16m
};
}
/// specifies a shuffle of elements that is suitable for input to PSHUFD.
bool isPSHUFDMask(SDNode *N);
+ /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
+ /// specifies a shuffle of elements that is suitable for input to PSHUFD.
+ bool isPSHUFHWMask(SDNode *N);
+
+ /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
+ /// specifies a shuffle of elements that is suitable for input to PSHUFD.
+ bool isPSHUFLWMask(SDNode *N);
+
/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to SHUFP*.
bool isSHUFPMask(SDNode *N);
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
bool isMOVHLPSMask(SDNode *N);
+ /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
+ /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
+ /// <2, 3, 2, 3>
+ bool isMOVHLPS_v_undef_Mask(SDNode *N);
+
+ /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
+ /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
+ bool isMOVLPMask(SDNode *N);
+
+ /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
+ /// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
+ /// as well as MOVLHPS.
+ bool isMOVHPMask(SDNode *N);
+
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKL.
- bool isUNPCKLMask(SDNode *N);
+ bool isUNPCKLMask(SDNode *N, bool V2IsSplat = false);
/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKH.
- bool isUNPCKHMask(SDNode *N);
+ bool isUNPCKHMask(SDNode *N, bool V2IsSplat = false);
+
+ /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
+ /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
+ /// <0, 0, 1, 1>
+ bool isUNPCKL_v_undef_Mask(SDNode *N);
+
+ /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
+ /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
+ /// <2, 2, 3, 3>
+ bool isUNPCKH_v_undef_Mask(SDNode *N);
+
+ /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
+ /// specifies a shuffle of elements that is suitable for input to MOVSS,
+ /// MOVSD, and MOVD, i.e. setting the lowest element.
+ bool isMOVLMask(SDNode *N);
+
+ /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
+ /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
+ bool isMOVSHDUPMask(SDNode *N);
+
+ /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
+ /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
+ bool isMOVSLDUPMask(SDNode *N);
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element.
bool isSplatMask(SDNode *N);
+ /// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand
+ /// specifies a splat of zero element.
+ bool isSplatLoMask(SDNode *N);
+
/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
/// instructions.
unsigned getShuffleSHUFImmediate(SDNode *N);
+
+ /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
+ /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
+ /// instructions.
+ unsigned getShufflePSHUFHWImmediate(SDNode *N);
+
+ /// getShufflePSHUFKWImmediate - Return the appropriate immediate to shuffle
+ /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
+ /// instructions.
+ unsigned getShufflePSHUFLWImmediate(SDNode *N);
}
- //===----------------------------------------------------------------------===//
+ //===--------------------------------------------------------------------===//
// X86TargetLowering - X86 Implementation of the TargetLowering interface
class X86TargetLowering : public TargetLowering {
int VarArgsFrameIndex; // FrameIndex for start of varargs area.
- int ReturnAddrIndex; // FrameIndex for return slot.
+ int RegSaveFrameIndex; // X86-64 vararg func register save area.
+ unsigned VarArgsGPOffset; // X86-64 vararg func int reg offset.
+ unsigned VarArgsFPOffset; // X86-64 vararg func fp reg offset.
int BytesToPopOnReturn; // Number of arg bytes ret should pop.
int BytesCallerReserves; // Number of arg bytes caller makes.
+
public:
- X86TargetLowering(TargetMachine &TM);
+ explicit X86TargetLowering(TargetMachine &TM);
+
+ /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
+ /// jumptable.
+ SDOperand getPICJumpTableRelocBase(SDOperand Table,
+ SelectionDAG &DAG) const;
// Return the number of bytes that a function should pop when it returns (in
// addition to the space used by the return address).
// to this function.
unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
+ /// getStackPtrReg - Return the stack pointer register we are using: either
+ /// ESP or RSP.
+ unsigned getStackPtrReg() const { return X86StackPtr; }
+
/// LowerOperation - Provide custom lowering hooks for some operations.
///
virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
- /// LowerArguments - This hook must be implemented to indicate how we should
- /// lower the arguments for the specified function, into the specified DAG.
- virtual std::vector<SDOperand>
- LowerArguments(Function &F, SelectionDAG &DAG);
-
- /// LowerCallTo - This hook lowers an abstract call to a function into an
- /// actual call.
- virtual std::pair<SDOperand, SDOperand>
- LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg, unsigned CC,
- bool isTailCall, SDOperand Callee, ArgListTy &Args,
- SelectionDAG &DAG);
+ /// ExpandOperation - Custom lower the specified operation, splitting the
+ /// value into two pieces.
+ ///
+ virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG);
- virtual std::pair<SDOperand, SDOperand>
- LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
- SelectionDAG &DAG);
+
+ virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual MachineBasicBlock *InsertAtEndOfBasicBlock(MachineInstr *MI,
MachineBasicBlock *MBB);
uint64_t Mask,
uint64_t &KnownZero,
uint64_t &KnownOne,
+ const SelectionDAG &DAG,
unsigned Depth = 0) const;
SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG);
+ ConstraintType getConstraintType(const std::string &Constraint) const;
+
std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const;
- /// isLegalAddressImmediate - Return true if the integer value or
- /// GlobalValue can be used as the offset of the target addressing mode.
- virtual bool isLegalAddressImmediate(int64_t V) const;
- virtual bool isLegalAddressImmediate(GlobalValue *GV) const;
-
+ /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+ /// vector. If it is invalid, don't add anything to Ops.
+ virtual void LowerAsmOperandForConstraint(SDOperand Op,
+ char ConstraintLetter,
+ std::vector<SDOperand> &Ops,
+ SelectionDAG &DAG);
+
+ /// getRegForInlineAsmConstraint - Given a physical register constraint
+ /// (e.g. {edx}), return the register number and the register class for the
+ /// register. This should only be used for C_Register constraints. On
+ /// error, this returns a register number of 0.
+ std::pair<unsigned, const TargetRegisterClass*>
+ getRegForInlineAsmConstraint(const std::string &Constraint,
+ MVT::ValueType VT) const;
+
+ /// isLegalAddressingMode - Return true if the addressing mode represented
+ /// by AM is legal for this target, for a load/store of the specified type.
+ virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
+
+ /// isTruncateFree - Return true if it's free to truncate a value of
+ /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
+ /// register EAX to i16 by referencing its sub-register AX.
+ virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
+ virtual bool isTruncateFree(MVT::ValueType VT1, MVT::ValueType VT2) const;
+
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
- /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
- /// are assumed to be legal.
+ /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
+ /// values are assumed to be legal.
virtual bool isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const;
- private:
- // C Calling Convention implementation.
- std::vector<SDOperand> LowerCCCArguments(Function &F, SelectionDAG &DAG);
- std::pair<SDOperand, SDOperand>
- LowerCCCCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
- bool isTailCall,
- SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
-
- // Fast Calling Convention implementation.
- std::vector<SDOperand> LowerFastCCArguments(Function &F, SelectionDAG &DAG);
- std::pair<SDOperand, SDOperand>
- LowerFastCCCallTo(SDOperand Chain, const Type *RetTy, bool isTailCall,
- SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
+ /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
+ /// used by Targets can use this to indicate if there is a suitable
+ /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
+ /// pool entry.
+ virtual bool isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
+ MVT::ValueType EVT,
+ SelectionDAG &DAG) const;
+
+ /// IsEligibleForTailCallOptimization - Check whether the call is eligible
+ /// for tail call optimization. Target which want to do tail call
+ /// optimization should implement this function.
+ virtual bool IsEligibleForTailCallOptimization(SDOperand Call,
+ SDOperand Ret,
+ SelectionDAG &DAG) const;
+
+ virtual const TargetSubtarget* getSubtarget() {
+ return static_cast<const TargetSubtarget*>(Subtarget);
+ }
+
+ private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
-
- /// X86ScalarSSE - Select between SSE2 or x87 floating point ops.
- bool X86ScalarSSE;
+ const MRegisterInfo *RegInfo;
+
+ /// X86StackPtr - X86 physical register used as stack ptr.
+ unsigned X86StackPtr;
+
+ /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
+ /// floating point ops.
+ /// When SSE is available, use it for f32 operations.
+ /// When SSE2 is available, use it for f64 operations.
+ bool X86ScalarSSEf32;
+ bool X86ScalarSSEf64;
+
+ SDNode *LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode*TheCall,
+ unsigned CallingConv, SelectionDAG &DAG);
+
+
+ SDOperand LowerMemArgument(SDOperand Op, SelectionDAG &DAG,
+ const CCValAssign &VA, MachineFrameInfo *MFI,
+ SDOperand Root, unsigned i);
+
+ SDOperand LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
+ const SDOperand &StackPtr,
+ const CCValAssign &VA, SDOperand Chain,
+ SDOperand Arg);
+
+ // C and StdCall Calling Convention implementation.
+ SDOperand LowerCCCArguments(SDOperand Op, SelectionDAG &DAG,
+ bool isStdCall = false);
+ SDOperand LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC);
+
+ // X86-64 C Calling Convention implementation.
+ SDOperand LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG,unsigned CC);
+
+ // fast calling convention (tail call) implementation for 32/64bit
+ SDOperand LowerX86_TailCallTo(SDOperand Op,
+ SelectionDAG & DAG, unsigned CC);
+ unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG);
+ // Fast and FastCall Calling Convention implementation.
+ SDOperand LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC);
+
+ std::pair<SDOperand,SDOperand> FP_TO_SINTHelper(SDOperand Op,
+ SelectionDAG &DAG);
+
+ SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerShift(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFABS(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFNEG(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerSELECT(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerBRCOND(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerMEMSET(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerMEMCPYInline(SDOperand Dest, SDOperand Source,
+ SDOperand Chain, unsigned Size, unsigned Align,
+ SelectionDAG &DAG);
+ SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerVACOPY(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerTRAMPOLINE(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFLT_ROUNDS(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerCTLZ(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerCTTZ(SDOperand Op, SelectionDAG &DAG);
+ SDNode *ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG);
+ SDNode *ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG);
};
}