X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FSelectionDAG%2FTargetLowering.cpp;h=c6fd113ee75912945e65eea7f9e2438f24e83852;hb=4ee451de366474b9c228b4e5fa573795a715216d;hp=195b1504a577be075ae9527d7a27786a4693dc70;hpb=d2f340b746e54fca27b654fd6740973fdf6b85f1;p=oota-llvm.git diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 195b1504a57..c6fd113ee75 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by the LLVM research group and is distributed under -// the University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -12,13 +12,17 @@ //===----------------------------------------------------------------------===// #include "llvm/Target/TargetLowering.h" +#include "llvm/Target/TargetSubtarget.h" #include "llvm/Target/TargetData.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/MRegisterInfo.h" #include "llvm/DerivedTypes.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetAsmInfo.h" +#include "llvm/CallingConv.h" using namespace llvm; /// InitLibcallNames - Set default libcall names. @@ -44,38 +48,58 @@ static void InitLibcallNames(const char **Names) { Names[RTLIB::NEG_I64] = "__negdi2"; Names[RTLIB::ADD_F32] = "__addsf3"; Names[RTLIB::ADD_F64] = "__adddf3"; + Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; Names[RTLIB::SUB_F32] = "__subsf3"; Names[RTLIB::SUB_F64] = "__subdf3"; + Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; Names[RTLIB::MUL_F32] = "__mulsf3"; Names[RTLIB::MUL_F64] = "__muldf3"; + Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; Names[RTLIB::DIV_F32] = "__divsf3"; Names[RTLIB::DIV_F64] = "__divdf3"; + Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; Names[RTLIB::REM_F32] = "fmodf"; Names[RTLIB::REM_F64] = "fmod"; + Names[RTLIB::REM_PPCF128] = "fmodl"; Names[RTLIB::NEG_F32] = "__negsf2"; Names[RTLIB::NEG_F64] = "__negdf2"; Names[RTLIB::POWI_F32] = "__powisf2"; Names[RTLIB::POWI_F64] = "__powidf2"; + Names[RTLIB::POWI_F80] = "__powixf2"; + Names[RTLIB::POWI_PPCF128] = "__powitf2"; Names[RTLIB::SQRT_F32] = "sqrtf"; Names[RTLIB::SQRT_F64] = "sqrt"; + Names[RTLIB::SQRT_F80] = "sqrtl"; + Names[RTLIB::SQRT_PPCF128] = "sqrtl"; Names[RTLIB::SIN_F32] = "sinf"; Names[RTLIB::SIN_F64] = "sin"; Names[RTLIB::COS_F32] = "cosf"; Names[RTLIB::COS_F64] = "cos"; + Names[RTLIB::POW_F32] = "powf"; + Names[RTLIB::POW_F64] = "pow"; + Names[RTLIB::POW_F80] = "powl"; + Names[RTLIB::POW_PPCF128] = "powl"; Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; + Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; + Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; + Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; + Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; + Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; + Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; + Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; @@ -128,13 +152,20 @@ TargetLowering::TargetLowering(TargetMachine &tm) memset(OpActions, 0, sizeof(OpActions)); memset(LoadXActions, 0, sizeof(LoadXActions)); memset(&StoreXActions, 0, sizeof(StoreXActions)); - // Initialize all indexed load / store to expand. + memset(&IndexedModeActions, 0, sizeof(IndexedModeActions)); + memset(&ConvertActions, 0, sizeof(ConvertActions)); + + // Set default actions for various operations. for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { + // Default all indexed load / store to expand. for (unsigned IM = (unsigned)ISD::PRE_INC; IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand); setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand); } + + // These operations default to expand. + setOperationAction(ISD::FGETSIGN, (MVT::ValueType)VT, Expand); } IsLittleEndian = TD->isLittleEndian(); @@ -142,8 +173,7 @@ TargetLowering::TargetLowering(TargetMachine &tm) ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD->getIntPtrType()); ShiftAmtHandling = Undefined; memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); - memset(TargetDAGCombineArray, 0, - sizeof(TargetDAGCombineArray)/sizeof(TargetDAGCombineArray[0])); + memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; allowUnalignedMemoryAccesses = false; UseUnderscoreSetJmp = false; @@ -154,56 +184,72 @@ TargetLowering::TargetLowering(TargetMachine &tm) StackPointerRegisterToSaveRestore = 0; ExceptionPointerRegister = 0; ExceptionSelectorRegister = 0; + SetCCResultContents = UndefinedSetCCResult; SchedPreferenceInfo = SchedulingForLatency; JumpBufSize = 0; JumpBufAlignment = 0; + IfCvtBlockSizeLimit = 2; InitLibcallNames(LibcallRoutineNames); InitCmpLibcallCCs(CmpLibcallCCs); + + // Tell Legalize whether the assembler supports DEBUG_LOC. + if (!TM.getTargetAsmInfo()->hasDotLocAndDotFile()) + setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); } TargetLowering::~TargetLowering() {} -/// setValueTypeAction - Set the action for a particular value type. This -/// assumes an action has not already been set for this value type. -static void SetValueTypeAction(MVT::ValueType VT, - TargetLowering::LegalizeAction Action, - TargetLowering &TLI, - MVT::ValueType *TransformToType, - TargetLowering::ValueTypeActionImpl &ValueTypeActions) { - ValueTypeActions.setTypeAction(VT, Action); - if (Action == TargetLowering::Promote) { - MVT::ValueType PromoteTo; - if (VT == MVT::f32) - PromoteTo = MVT::f64; - else { - unsigned LargerReg = VT+1; - while (!TLI.isTypeLegal((MVT::ValueType)LargerReg)) { - ++LargerReg; - assert(MVT::isInteger((MVT::ValueType)LargerReg) && - "Nothing to promote to??"); - } - PromoteTo = (MVT::ValueType)LargerReg; - } - assert(MVT::isInteger(VT) == MVT::isInteger(PromoteTo) && - MVT::isFloatingPoint(VT) == MVT::isFloatingPoint(PromoteTo) && - "Can only promote from int->int or fp->fp!"); - assert(VT < PromoteTo && "Must promote to a larger type!"); - TransformToType[VT] = PromoteTo; - } else if (Action == TargetLowering::Expand) { - // f32 and f64 is each expanded to corresponding integer type of same size. - if (VT == MVT::f32) - TransformToType[VT] = MVT::i32; - else if (VT == MVT::f64) - TransformToType[VT] = MVT::i64; - else { - assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 && - "Cannot expand this type: target must support SOME integer reg!"); - // Expand to the next smaller integer type! - TransformToType[VT] = (MVT::ValueType)(VT-1); - } +SDOperand TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) { + assert(getSubtarget() && "Subtarget not defined"); + SDOperand ChainOp = Op.getOperand(0); + SDOperand DestOp = Op.getOperand(1); + SDOperand SourceOp = Op.getOperand(2); + SDOperand CountOp = Op.getOperand(3); + SDOperand AlignOp = Op.getOperand(4); + SDOperand AlwaysInlineOp = Op.getOperand(5); + + bool AlwaysInline = (bool)cast(AlwaysInlineOp)->getValue(); + unsigned Align = (unsigned)cast(AlignOp)->getValue(); + if (Align == 0) Align = 1; + + // If size is unknown, call memcpy. + ConstantSDNode *I = dyn_cast(CountOp); + if (!I) { + assert(!AlwaysInline && "Cannot inline copy of unknown size"); + return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG); } + + // If not DWORD aligned or if size is more than threshold, then call memcpy. + // The libc version is likely to be faster for the following cases. It can + // use the address value and run time information about the CPU. + // With glibc 2.6.1 on a core 2, coping an array of 100M longs was 30% faster + unsigned Size = I->getValue(); + if (AlwaysInline || + (Size <= getSubtarget()->getMaxInlineSizeThreshold() && + (Align & 3) == 0)) + return LowerMEMCPYInline(ChainOp, DestOp, SourceOp, Size, Align, DAG); + return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG); +} + + +SDOperand TargetLowering::LowerMEMCPYCall(SDOperand Chain, + SDOperand Dest, + SDOperand Source, + SDOperand Count, + SelectionDAG &DAG) { + MVT::ValueType IntPtr = getPointerTy(); + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + Entry.Ty = getTargetData()->getIntPtrType(); + Entry.Node = Dest; Args.push_back(Entry); + Entry.Node = Source; Args.push_back(Entry); + Entry.Node = Count; Args.push_back(Entry); + std::pair CallResult = + LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, + DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG); + return CallResult.second; } @@ -213,9 +259,13 @@ void TargetLowering::computeRegisterProperties() { assert(MVT::LAST_VALUETYPE <= 32 && "Too many value types for ValueTypeActions to hold!"); - // Everything defaults to one. - for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) - NumElementsForVT[i] = 1; + // Everything defaults to needing one register. + for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { + NumRegistersForVT[i] = 1; + RegisterTypeForVT[i] = TransformToType[i] = i; + } + // ...except isVoid, which doesn't need any registers. + NumRegistersForVT[MVT::isVoid] = 0; // Find the largest integer register class. unsigned LargestIntReg = MVT::i128; @@ -224,56 +274,74 @@ void TargetLowering::computeRegisterProperties() { // Every integer value type larger than this largest register takes twice as // many registers to represent as the previous ValueType. - unsigned ExpandedReg = LargestIntReg; ++LargestIntReg; - for (++ExpandedReg; MVT::isInteger((MVT::ValueType)ExpandedReg);++ExpandedReg) - NumElementsForVT[ExpandedReg] = 2*NumElementsForVT[ExpandedReg-1]; - - // Inspect all of the ValueType's possible, deciding how to process them. - for (unsigned IntReg = MVT::i1; IntReg <= MVT::i128; ++IntReg) - // If we are expanding this type, expand it! - if (getNumElements((MVT::ValueType)IntReg) != 1) - SetValueTypeAction((MVT::ValueType)IntReg, Expand, *this, TransformToType, - ValueTypeActions); - else if (!isTypeLegal((MVT::ValueType)IntReg)) - // Otherwise, if we don't have native support, we must promote to a - // larger type. - SetValueTypeAction((MVT::ValueType)IntReg, Promote, *this, - TransformToType, ValueTypeActions); - else - TransformToType[(MVT::ValueType)IntReg] = (MVT::ValueType)IntReg; - - // If the target does not have native F64 support, expand it to I64. We will - // be generating soft float library calls. If the target does not have native - // support for F32, promote it to F64 if it is legal. Otherwise, expand it to - // I32. - if (isTypeLegal(MVT::f64)) - TransformToType[MVT::f64] = MVT::f64; - else { - NumElementsForVT[MVT::f64] = NumElementsForVT[MVT::i64]; - SetValueTypeAction(MVT::f64, Expand, *this, TransformToType, - ValueTypeActions); + for (MVT::ValueType ExpandedReg = LargestIntReg + 1; + MVT::isInteger(ExpandedReg); ++ExpandedReg) { + NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; + RegisterTypeForVT[ExpandedReg] = LargestIntReg; + TransformToType[ExpandedReg] = ExpandedReg - 1; + ValueTypeActions.setTypeAction(ExpandedReg, Expand); + } + + // Inspect all of the ValueType's smaller than the largest integer + // register to see which ones need promotion. + MVT::ValueType LegalIntReg = LargestIntReg; + for (MVT::ValueType IntReg = LargestIntReg - 1; + IntReg >= MVT::i1; --IntReg) { + if (isTypeLegal(IntReg)) { + LegalIntReg = IntReg; + } else { + RegisterTypeForVT[IntReg] = TransformToType[IntReg] = LegalIntReg; + ValueTypeActions.setTypeAction(IntReg, Promote); + } } - if (isTypeLegal(MVT::f32)) - TransformToType[MVT::f32] = MVT::f32; - else if (isTypeLegal(MVT::f64)) - SetValueTypeAction(MVT::f32, Promote, *this, TransformToType, - ValueTypeActions); - else { - NumElementsForVT[MVT::f32] = NumElementsForVT[MVT::i32]; - SetValueTypeAction(MVT::f32, Expand, *this, TransformToType, - ValueTypeActions); + + // ppcf128 type is really two f64's. + if (!isTypeLegal(MVT::ppcf128)) { + NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; + RegisterTypeForVT[MVT::ppcf128] = MVT::f64; + TransformToType[MVT::ppcf128] = MVT::f64; + ValueTypeActions.setTypeAction(MVT::ppcf128, Expand); + } + + // Decide how to handle f64. If the target does not have native f64 support, + // expand it to i64 and we will be generating soft float library calls. + if (!isTypeLegal(MVT::f64)) { + NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; + RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; + TransformToType[MVT::f64] = MVT::i64; + ValueTypeActions.setTypeAction(MVT::f64, Expand); + } + + // Decide how to handle f32. If the target does not have native support for + // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. + if (!isTypeLegal(MVT::f32)) { + if (isTypeLegal(MVT::f64)) { + NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; + RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; + TransformToType[MVT::f32] = MVT::f64; + ValueTypeActions.setTypeAction(MVT::f32, Promote); + } else { + NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; + RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; + TransformToType[MVT::f32] = MVT::i32; + ValueTypeActions.setTypeAction(MVT::f32, Expand); + } } - // Set MVT::Vector to always be Expanded - SetValueTypeAction(MVT::Vector, Expand, *this, TransformToType, - ValueTypeActions); - - // Loop over all of the legal vector value types, specifying an identity type - // transformation. - for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; + // Loop over all of the vector value types to see which need transformations. + for (MVT::ValueType i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { - if (isTypeLegal((MVT::ValueType)i)) - TransformToType[i] = (MVT::ValueType)i; + if (!isTypeLegal(i)) { + MVT::ValueType IntermediateVT, RegisterVT; + unsigned NumIntermediates; + NumRegistersForVT[i] = + getVectorTypeBreakdown(i, + IntermediateVT, NumIntermediates, + RegisterVT); + RegisterTypeForVT[i] = RegisterVT; + TransformToType[i] = MVT::Other; // this isn't actually used + ValueTypeActions.setTypeAction(i, Expand); + } } } @@ -281,41 +349,52 @@ const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { return NULL; } -/// getVectorTypeBreakdown - Packed types are broken down into some number of -/// legal first class types. For example, <8 x float> maps to 2 MVT::v4f32 +/// getVectorTypeBreakdown - Vector types are broken down into some number of +/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. +/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. /// -/// This method returns the number and type of the resultant breakdown. +/// This method returns the number of registers needed, and the VT for each +/// register. It also returns the VT and quantity of the intermediate values +/// before they are promoted/expanded. /// -unsigned TargetLowering::getVectorTypeBreakdown(const VectorType *PTy, - MVT::ValueType &PTyElementVT, - MVT::ValueType &PTyLegalElementVT) const { +unsigned TargetLowering::getVectorTypeBreakdown(MVT::ValueType VT, + MVT::ValueType &IntermediateVT, + unsigned &NumIntermediates, + MVT::ValueType &RegisterVT) const { // Figure out the right, legal destination reg to copy into. - unsigned NumElts = PTy->getNumElements(); - MVT::ValueType EltTy = getValueType(PTy->getElementType()); + unsigned NumElts = MVT::getVectorNumElements(VT); + MVT::ValueType EltTy = MVT::getVectorElementType(VT); unsigned NumVectorRegs = 1; + // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we + // could break down into LHS/RHS like LegalizeDAG does. + if (!isPowerOf2_32(NumElts)) { + NumVectorRegs = NumElts; + NumElts = 1; + } + // Divide the input until we get to a supported size. This will always // end with a scalar if the target doesn't support vectors. - while (NumElts > 1 && !isTypeLegal(getVectorType(EltTy, NumElts))) { + while (NumElts > 1 && + !isTypeLegal(MVT::getVectorType(EltTy, NumElts))) { NumElts >>= 1; NumVectorRegs <<= 1; } - - MVT::ValueType VT; - if (NumElts == 1) { - VT = EltTy; - } else { - VT = getVectorType(EltTy, NumElts); - } - PTyElementVT = VT; - MVT::ValueType DestVT = getTypeToTransformTo(VT); - PTyLegalElementVT = DestVT; - if (DestVT < VT) { + NumIntermediates = NumVectorRegs; + + MVT::ValueType NewVT = MVT::getVectorType(EltTy, NumElts); + if (!isTypeLegal(NewVT)) + NewVT = EltTy; + IntermediateVT = NewVT; + + MVT::ValueType DestVT = getTypeToTransformTo(NewVT); + RegisterVT = DestVT; + if (DestVT < NewVT) { // Value is expanded, e.g. i64 -> i16. - return NumVectorRegs*(MVT::getSizeInBits(VT)/MVT::getSizeInBits(DestVT)); + return NumVectorRegs*(MVT::getSizeInBits(NewVT)/MVT::getSizeInBits(DestVT)); } else { // Otherwise, promotion or legal types use the same number of registers as // the vector decimated to the appropriate level. @@ -325,6 +404,13 @@ unsigned TargetLowering::getVectorTypeBreakdown(const VectorType *PTy, return 1; } +SDOperand TargetLowering::getPICJumpTableRelocBase(SDOperand Table, + SelectionDAG &DAG) const { + if (usesGlobalOffsetTable()) + return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); + return Table; +} + //===----------------------------------------------------------------------===// // Optimization Methods //===----------------------------------------------------------------------===// @@ -367,12 +453,17 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, TargetLoweringOpt &TLO, unsigned Depth) const { KnownZero = KnownOne = 0; // Don't know anything. + + // The masks are not wide enough to represent this type! Should use APInt. + if (Op.getValueType() == MVT::i128) + return false; + // Other users may use these bits. if (!Op.Val->hasOneUse()) { if (Depth != 0) { // If not at the root, Just compute the KnownZero/KnownOne bits to // simplify things downstream. - ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); + TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); return false; } // If this is the root being simplified, allow it to have multiple uses, @@ -401,8 +492,8 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, // the RHS. if (ConstantSDNode *RHSC = dyn_cast(Op.getOperand(1))) { uint64_t LHSZero, LHSOne; - ComputeMaskedBits(Op.getOperand(0), DemandedMask, - LHSZero, LHSOne, Depth+1); + TLO.DAG.ComputeMaskedBits(Op.getOperand(0), DemandedMask, + LHSZero, LHSOne, Depth+1); // If the LHS already has zeros where RHSC does, this and is dead. if ((LHSZero & DemandedMask) == (~RHSC->getValue() & DemandedMask)) return TLO.CombineTo(Op, Op.getOperand(0)); @@ -566,7 +657,32 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, break; case ISD::SHL: if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { - if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask >> SA->getValue(), + unsigned ShAmt = SA->getValue(); + SDOperand InOp = Op.getOperand(0); + + // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a + // single shift. We can do this if the bottom bits (which are shifted + // out) are never demanded. + if (InOp.getOpcode() == ISD::SRL && + isa(InOp.getOperand(1))) { + if (ShAmt && (DemandedMask & ((1ULL << ShAmt)-1)) == 0) { + unsigned C1 = cast(InOp.getOperand(1))->getValue(); + unsigned Opc = ISD::SHL; + int Diff = ShAmt-C1; + if (Diff < 0) { + Diff = -Diff; + Opc = ISD::SRL; + } + + SDOperand NewSA = + TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); + MVT::ValueType VT = Op.getValueType(); + return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, + InOp.getOperand(0), NewSA)); + } + } + + if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask >> ShAmt, KnownZero, KnownOne, TLO, Depth+1)) return true; KnownZero <<= SA->getValue(); @@ -578,11 +694,33 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { MVT::ValueType VT = Op.getValueType(); unsigned ShAmt = SA->getValue(); + uint64_t TypeMask = MVT::getIntVTBitMask(VT); + unsigned VTSize = MVT::getSizeInBits(VT); + SDOperand InOp = Op.getOperand(0); + + // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a + // single shift. We can do this if the top bits (which are shifted out) + // are never demanded. + if (InOp.getOpcode() == ISD::SHL && + isa(InOp.getOperand(1))) { + if (ShAmt && (DemandedMask & (~0ULL << (VTSize-ShAmt))) == 0) { + unsigned C1 = cast(InOp.getOperand(1))->getValue(); + unsigned Opc = ISD::SRL; + int Diff = ShAmt-C1; + if (Diff < 0) { + Diff = -Diff; + Opc = ISD::SHL; + } + + SDOperand NewSA = + TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); + return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, + InOp.getOperand(0), NewSA)); + } + } // Compute the new bits that are at the top now. - uint64_t TypeMask = MVT::getIntVTBitMask(VT); - if (SimplifyDemandedBits(Op.getOperand(0), - (DemandedMask << ShAmt) & TypeMask, + if (SimplifyDemandedBits(InOp, (DemandedMask << ShAmt) & TypeMask, KnownZero, KnownOne, TLO, Depth+1)) return true; assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); @@ -592,7 +730,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, KnownOne >>= ShAmt; uint64_t HighBits = (1ULL << ShAmt)-1; - HighBits <<= MVT::getSizeInBits(VT) - ShAmt; + HighBits <<= VTSize - ShAmt; KnownZero |= HighBits; // High bits known zero. } break; @@ -806,13 +944,39 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, KnownZero |= ~InMask & DemandedMask; break; } + case ISD::FGETSIGN: + // All bits are zero except the low bit. + KnownZero = MVT::getIntVTBitMask(Op.getValueType()) ^ 1; + break; + case ISD::BIT_CONVERT: +#if 0 + // If this is an FP->Int bitcast and if the sign bit is the only thing that + // is demanded, turn this into a FGETSIGN. + if (DemandedMask == MVT::getIntVTSignBit(Op.getValueType()) && + MVT::isFloatingPoint(Op.getOperand(0).getValueType()) && + !MVT::isVector(Op.getOperand(0).getValueType())) { + // Only do this xform if FGETSIGN is valid or if before legalize. + if (!TLO.AfterLegalize || + isOperationLegal(ISD::FGETSIGN, Op.getValueType())) { + // Make a FGETSIGN + SHL to move the sign bit into the appropriate + // place. We expect the SHL to be eliminated by other optimizations. + SDOperand Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), + Op.getOperand(0)); + unsigned ShVal = MVT::getSizeInBits(Op.getValueType())-1; + SDOperand ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); + return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(), + Sign, ShAmt)); + } + } +#endif + break; case ISD::ADD: case ISD::SUB: case ISD::INTRINSIC_WO_CHAIN: case ISD::INTRINSIC_W_CHAIN: case ISD::INTRINSIC_VOID: // Just use ComputeMaskedBits to compute output bits. - ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); + TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); break; } @@ -824,329 +988,6 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, return false; } -/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use -/// this predicate to simplify operations downstream. Mask is known to be zero -/// for bits that V cannot have. -bool TargetLowering::MaskedValueIsZero(SDOperand Op, uint64_t Mask, - unsigned Depth) const { - uint64_t KnownZero, KnownOne; - ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - return (KnownZero & Mask) == Mask; -} - -/// ComputeMaskedBits - Determine which of the bits specified in Mask are -/// known to be either zero or one and return them in the KnownZero/KnownOne -/// bitsets. This code only analyzes bits in Mask, in order to short-circuit -/// processing. -void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask, - uint64_t &KnownZero, uint64_t &KnownOne, - unsigned Depth) const { - KnownZero = KnownOne = 0; // Don't know anything. - if (Depth == 6 || Mask == 0) - return; // Limit search depth. - - uint64_t KnownZero2, KnownOne2; - - switch (Op.getOpcode()) { - case ISD::Constant: - // We know all of the bits for a constant! - KnownOne = cast(Op)->getValue() & Mask; - KnownZero = ~KnownOne & Mask; - return; - case ISD::AND: - // If either the LHS or the RHS are Zero, the result is zero. - ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); - Mask &= ~KnownZero; - ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); - - // Output known-1 bits are only known if set in both the LHS & RHS. - KnownOne &= KnownOne2; - // Output known-0 are known to be clear if zero in either the LHS | RHS. - KnownZero |= KnownZero2; - return; - case ISD::OR: - ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); - Mask &= ~KnownOne; - ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); - - // Output known-0 bits are only known if clear in both the LHS & RHS. - KnownZero &= KnownZero2; - // Output known-1 are known to be set if set in either the LHS | RHS. - KnownOne |= KnownOne2; - return; - case ISD::XOR: { - ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); - ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); - - // Output known-0 bits are known if clear or set in both the LHS & RHS. - uint64_t KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); - // Output known-1 are known to be set if set in only one of the LHS, RHS. - KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); - KnownZero = KnownZeroOut; - return; - } - case ISD::SELECT: - ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1); - ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); - - // Only known if known in both the LHS and RHS. - KnownOne &= KnownOne2; - KnownZero &= KnownZero2; - return; - case ISD::SELECT_CC: - ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1); - ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); - - // Only known if known in both the LHS and RHS. - KnownOne &= KnownOne2; - KnownZero &= KnownZero2; - return; - case ISD::SETCC: - // If we know the result of a setcc has the top bits zero, use this info. - if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult) - KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); - return; - case ISD::SHL: - // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 - if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { - ComputeMaskedBits(Op.getOperand(0), Mask >> SA->getValue(), - KnownZero, KnownOne, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - KnownZero <<= SA->getValue(); - KnownOne <<= SA->getValue(); - KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero. - } - return; - case ISD::SRL: - // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 - if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { - MVT::ValueType VT = Op.getValueType(); - unsigned ShAmt = SA->getValue(); - - uint64_t TypeMask = MVT::getIntVTBitMask(VT); - ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt) & TypeMask, - KnownZero, KnownOne, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - KnownZero &= TypeMask; - KnownOne &= TypeMask; - KnownZero >>= ShAmt; - KnownOne >>= ShAmt; - - uint64_t HighBits = (1ULL << ShAmt)-1; - HighBits <<= MVT::getSizeInBits(VT)-ShAmt; - KnownZero |= HighBits; // High bits known zero. - } - return; - case ISD::SRA: - if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { - MVT::ValueType VT = Op.getValueType(); - unsigned ShAmt = SA->getValue(); - - // Compute the new bits that are at the top now. - uint64_t TypeMask = MVT::getIntVTBitMask(VT); - - uint64_t InDemandedMask = (Mask << ShAmt) & TypeMask; - // If any of the demanded bits are produced by the sign extension, we also - // demand the input sign bit. - uint64_t HighBits = (1ULL << ShAmt)-1; - HighBits <<= MVT::getSizeInBits(VT) - ShAmt; - if (HighBits & Mask) - InDemandedMask |= MVT::getIntVTSignBit(VT); - - ComputeMaskedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne, - Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - KnownZero &= TypeMask; - KnownOne &= TypeMask; - KnownZero >>= ShAmt; - KnownOne >>= ShAmt; - - // Handle the sign bits. - uint64_t SignBit = MVT::getIntVTSignBit(VT); - SignBit >>= ShAmt; // Adjust to where it is now in the mask. - - if (KnownZero & SignBit) { - KnownZero |= HighBits; // New bits are known zero. - } else if (KnownOne & SignBit) { - KnownOne |= HighBits; // New bits are known one. - } - } - return; - case ISD::SIGN_EXTEND_INREG: { - MVT::ValueType EVT = cast(Op.getOperand(1))->getVT(); - - // Sign extension. Compute the demanded bits in the result that are not - // present in the input. - uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & Mask; - - uint64_t InSignBit = MVT::getIntVTSignBit(EVT); - int64_t InputDemandedBits = Mask & MVT::getIntVTBitMask(EVT); - - // If the sign extended bits are demanded, we know that the sign - // bit is demanded. - if (NewBits) - InputDemandedBits |= InSignBit; - - ComputeMaskedBits(Op.getOperand(0), InputDemandedBits, - KnownZero, KnownOne, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - - // If the sign bit of the input is known set or clear, then we know the - // top bits of the result. - if (KnownZero & InSignBit) { // Input sign bit known clear - KnownZero |= NewBits; - KnownOne &= ~NewBits; - } else if (KnownOne & InSignBit) { // Input sign bit known set - KnownOne |= NewBits; - KnownZero &= ~NewBits; - } else { // Input sign bit unknown - KnownZero &= ~NewBits; - KnownOne &= ~NewBits; - } - return; - } - case ISD::CTTZ: - case ISD::CTLZ: - case ISD::CTPOP: { - MVT::ValueType VT = Op.getValueType(); - unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1; - KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT); - KnownOne = 0; - return; - } - case ISD::LOAD: { - if (ISD::isZEXTLoad(Op.Val)) { - LoadSDNode *LD = cast(Op); - MVT::ValueType VT = LD->getLoadedVT(); - KnownZero |= ~MVT::getIntVTBitMask(VT) & Mask; - } - return; - } - case ISD::ZERO_EXTEND: { - uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType()); - uint64_t NewBits = (~InMask) & Mask; - ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero, - KnownOne, Depth+1); - KnownZero |= NewBits & Mask; - KnownOne &= ~NewBits; - return; - } - case ISD::SIGN_EXTEND: { - MVT::ValueType InVT = Op.getOperand(0).getValueType(); - unsigned InBits = MVT::getSizeInBits(InVT); - uint64_t InMask = MVT::getIntVTBitMask(InVT); - uint64_t InSignBit = 1ULL << (InBits-1); - uint64_t NewBits = (~InMask) & Mask; - uint64_t InDemandedBits = Mask & InMask; - - // If any of the sign extended bits are demanded, we know that the sign - // bit is demanded. - if (NewBits & Mask) - InDemandedBits |= InSignBit; - - ComputeMaskedBits(Op.getOperand(0), InDemandedBits, KnownZero, - KnownOne, Depth+1); - // If the sign bit is known zero or one, the top bits match. - if (KnownZero & InSignBit) { - KnownZero |= NewBits; - KnownOne &= ~NewBits; - } else if (KnownOne & InSignBit) { - KnownOne |= NewBits; - KnownZero &= ~NewBits; - } else { // Otherwise, top bits aren't known. - KnownOne &= ~NewBits; - KnownZero &= ~NewBits; - } - return; - } - case ISD::ANY_EXTEND: { - MVT::ValueType VT = Op.getOperand(0).getValueType(); - ComputeMaskedBits(Op.getOperand(0), Mask & MVT::getIntVTBitMask(VT), - KnownZero, KnownOne, Depth+1); - return; - } - case ISD::TRUNCATE: { - ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType()); - KnownZero &= OutMask; - KnownOne &= OutMask; - break; - } - case ISD::AssertZext: { - MVT::ValueType VT = cast(Op.getOperand(1))->getVT(); - uint64_t InMask = MVT::getIntVTBitMask(VT); - ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero, - KnownOne, Depth+1); - KnownZero |= (~InMask) & Mask; - return; - } - case ISD::ADD: { - // If either the LHS or the RHS are Zero, the result is zero. - ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); - ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); - assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); - - // Output known-0 bits are known if clear or set in both the low clear bits - // common to both LHS & RHS. For example, 8+(X<<3) is known to have the - // low 3 bits clear. - uint64_t KnownZeroOut = std::min(CountTrailingZeros_64(~KnownZero), - CountTrailingZeros_64(~KnownZero2)); - - KnownZero = (1ULL << KnownZeroOut) - 1; - KnownOne = 0; - return; - } - case ISD::SUB: { - ConstantSDNode *CLHS = dyn_cast(Op.getOperand(0)); - if (!CLHS) return; - - // We know that the top bits of C-X are clear if X contains less bits - // than C (i.e. no wrap-around can happen). For example, 20-X is - // positive if we can prove that X is >= 0 and < 16. - MVT::ValueType VT = CLHS->getValueType(0); - if ((CLHS->getValue() & MVT::getIntVTSignBit(VT)) == 0) { // sign bit clear - unsigned NLZ = CountLeadingZeros_64(CLHS->getValue()+1); - uint64_t MaskV = (1ULL << (63-NLZ))-1; // NLZ can't be 64 with no sign bit - MaskV = ~MaskV & MVT::getIntVTBitMask(VT); - ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero, KnownOne, Depth+1); - - // If all of the MaskV bits are known to be zero, then we know the output - // top bits are zero, because we now know that the output is from [0-C]. - if ((KnownZero & MaskV) == MaskV) { - unsigned NLZ2 = CountLeadingZeros_64(CLHS->getValue()); - KnownZero = ~((1ULL << (64-NLZ2))-1) & Mask; // Top bits known zero. - KnownOne = 0; // No one bits known. - } else { - KnownZero = KnownOne = 0; // Otherwise, nothing known. - } - } - return; - } - default: - // Allow the target to implement this method for its nodes. - if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { - case ISD::INTRINSIC_WO_CHAIN: - case ISD::INTRINSIC_W_CHAIN: - case ISD::INTRINSIC_VOID: - computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne); - } - return; - } -} - /// computeMaskedBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the /// KnownZero/KnownOne bitsets. @@ -1154,6 +995,7 @@ void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, uint64_t Mask, uint64_t &KnownZero, uint64_t &KnownOne, + const SelectionDAG &DAG, unsigned Depth) const { assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || @@ -1165,222 +1007,6 @@ void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, KnownOne = 0; } -/// ComputeNumSignBits - Return the number of times the sign bit of the -/// register is replicated into the other bits. We know that at least 1 bit -/// is always equal to the sign bit (itself), but other cases can give us -/// information. For example, immediately after an "SRA X, 2", we know that -/// the top 3 bits are all equal to each other, so we return 3. -unsigned TargetLowering::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{ - MVT::ValueType VT = Op.getValueType(); - assert(MVT::isInteger(VT) && "Invalid VT!"); - unsigned VTBits = MVT::getSizeInBits(VT); - unsigned Tmp, Tmp2; - - if (Depth == 6) - return 1; // Limit search depth. - - switch (Op.getOpcode()) { - default: break; - case ISD::AssertSext: - Tmp = MVT::getSizeInBits(cast(Op.getOperand(1))->getVT()); - return VTBits-Tmp+1; - case ISD::AssertZext: - Tmp = MVT::getSizeInBits(cast(Op.getOperand(1))->getVT()); - return VTBits-Tmp; - - case ISD::Constant: { - uint64_t Val = cast(Op)->getValue(); - // If negative, invert the bits, then look at it. - if (Val & MVT::getIntVTSignBit(VT)) - Val = ~Val; - - // Shift the bits so they are the leading bits in the int64_t. - Val <<= 64-VTBits; - - // Return # leading zeros. We use 'min' here in case Val was zero before - // shifting. We don't want to return '64' as for an i32 "0". - return std::min(VTBits, CountLeadingZeros_64(Val)); - } - - case ISD::SIGN_EXTEND: - Tmp = VTBits-MVT::getSizeInBits(Op.getOperand(0).getValueType()); - return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; - - case ISD::SIGN_EXTEND_INREG: - // Max of the input and what this extends. - Tmp = MVT::getSizeInBits(cast(Op.getOperand(1))->getVT()); - Tmp = VTBits-Tmp+1; - - Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); - return std::max(Tmp, Tmp2); - - case ISD::SRA: - Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); - // SRA X, C -> adds C sign bits. - if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) { - Tmp += C->getValue(); - if (Tmp > VTBits) Tmp = VTBits; - } - return Tmp; - case ISD::SHL: - if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) { - // shl destroys sign bits. - Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); - if (C->getValue() >= VTBits || // Bad shift. - C->getValue() >= Tmp) break; // Shifted all sign bits out. - return Tmp - C->getValue(); - } - break; - case ISD::AND: - case ISD::OR: - case ISD::XOR: // NOT is handled here. - // Logical binary ops preserve the number of sign bits. - Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); - if (Tmp == 1) return 1; // Early out. - Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); - return std::min(Tmp, Tmp2); - - case ISD::SELECT: - Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); - if (Tmp == 1) return 1; // Early out. - Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); - return std::min(Tmp, Tmp2); - - case ISD::SETCC: - // If setcc returns 0/-1, all bits are sign bits. - if (getSetCCResultContents() == ZeroOrNegativeOneSetCCResult) - return VTBits; - break; - case ISD::ROTL: - case ISD::ROTR: - if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) { - unsigned RotAmt = C->getValue() & (VTBits-1); - - // Handle rotate right by N like a rotate left by 32-N. - if (Op.getOpcode() == ISD::ROTR) - RotAmt = (VTBits-RotAmt) & (VTBits-1); - - // If we aren't rotating out all of the known-in sign bits, return the - // number that are left. This handles rotl(sext(x), 1) for example. - Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); - if (Tmp > RotAmt+1) return Tmp-RotAmt; - } - break; - case ISD::ADD: - // Add can have at most one carry bit. Thus we know that the output - // is, at worst, one more bit than the inputs. - Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); - if (Tmp == 1) return 1; // Early out. - - // Special case decrementing a value (ADD X, -1): - if (ConstantSDNode *CRHS = dyn_cast(Op.getOperand(0))) - if (CRHS->isAllOnesValue()) { - uint64_t KnownZero, KnownOne; - uint64_t Mask = MVT::getIntVTBitMask(VT); - ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); - - // If the input is known to be 0 or 1, the output is 0/-1, which is all - // sign bits set. - if ((KnownZero|1) == Mask) - return VTBits; - - // If we are subtracting one from a positive number, there is no carry - // out of the result. - if (KnownZero & MVT::getIntVTSignBit(VT)) - return Tmp; - } - - Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); - if (Tmp2 == 1) return 1; - return std::min(Tmp, Tmp2)-1; - break; - - case ISD::SUB: - Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); - if (Tmp2 == 1) return 1; - - // Handle NEG. - if (ConstantSDNode *CLHS = dyn_cast(Op.getOperand(0))) - if (CLHS->getValue() == 0) { - uint64_t KnownZero, KnownOne; - uint64_t Mask = MVT::getIntVTBitMask(VT); - ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); - // If the input is known to be 0 or 1, the output is 0/-1, which is all - // sign bits set. - if ((KnownZero|1) == Mask) - return VTBits; - - // If the input is known to be positive (the sign bit is known clear), - // the output of the NEG has the same number of sign bits as the input. - if (KnownZero & MVT::getIntVTSignBit(VT)) - return Tmp2; - - // Otherwise, we treat this like a SUB. - } - - // Sub can have at most one carry bit. Thus we know that the output - // is, at worst, one more bit than the inputs. - Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); - if (Tmp == 1) return 1; // Early out. - return std::min(Tmp, Tmp2)-1; - break; - case ISD::TRUNCATE: - // FIXME: it's tricky to do anything useful for this, but it is an important - // case for targets like X86. - break; - } - - // Handle LOADX separately here. EXTLOAD case will fallthrough. - if (Op.getOpcode() == ISD::LOAD) { - LoadSDNode *LD = cast(Op); - unsigned ExtType = LD->getExtensionType(); - switch (ExtType) { - default: break; - case ISD::SEXTLOAD: // '17' bits known - Tmp = MVT::getSizeInBits(LD->getLoadedVT()); - return VTBits-Tmp+1; - case ISD::ZEXTLOAD: // '16' bits known - Tmp = MVT::getSizeInBits(LD->getLoadedVT()); - return VTBits-Tmp; - } - } - - // Allow the target to implement this method for its nodes. - if (Op.getOpcode() >= ISD::BUILTIN_OP_END || - Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || - Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || - Op.getOpcode() == ISD::INTRINSIC_VOID) { - unsigned NumBits = ComputeNumSignBitsForTargetNode(Op, Depth); - if (NumBits > 1) return NumBits; - } - - // Finally, if we can prove that the top bits of the result are 0's or 1's, - // use this information. - uint64_t KnownZero, KnownOne; - uint64_t Mask = MVT::getIntVTBitMask(VT); - ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth); - - uint64_t SignBit = MVT::getIntVTSignBit(VT); - if (KnownZero & SignBit) { // SignBit is 0 - Mask = KnownZero; - } else if (KnownOne & SignBit) { // SignBit is 1; - Mask = KnownOne; - } else { - // Nothing known. - return 1; - } - - // Okay, we know that the sign bit in Mask is set. Use CLZ to determine - // the number of identical bits in the top of the input value. - Mask ^= ~0ULL; - Mask <<= 64-VTBits; - // Return # leading zeros. We use 'min' here in case Val was zero before - // shifting. We don't want to return '64' as for an i32 "0". - return std::min(VTBits, CountLeadingZeros_64(Mask)); -} - - - /// ComputeNumSignBitsForTargetNode - This method can be implemented by /// targets that want to expose additional information about sign bits to the /// DAG Combiner. @@ -1539,7 +1165,8 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, cast(N0.getOperand(1))->getValue() == 1) { // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We // can only do this if the top bits are known zero. - if (MaskedValueIsZero(N0, MVT::getIntVTBitMask(N0.getValueType())-1)){ + if (DAG.MaskedValueIsZero(N0, + MVT::getIntVTBitMask(N0.getValueType())-1)){ // Okay, get the un-inverted input value. SDOperand Val; if (N0.getOpcode() == ISD::XOR) @@ -1653,6 +1280,28 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, // Constant fold or commute setcc. SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond); if (O.Val) return O; + } else if (ConstantFPSDNode *CFP = dyn_cast(N1.Val)) { + // If the RHS of an FP comparison is a constant, simplify it away in + // some cases. + if (CFP->getValueAPF().isNaN()) { + // If an operand is known to be a nan, we can fold it. + switch (ISD::getUnorderedFlavor(Cond)) { + default: assert(0 && "Unknown flavor!"); + case 0: // Known false. + return DAG.getConstant(0, VT); + case 1: // Known true. + return DAG.getConstant(1, VT); + case 2: // undefind. + return DAG.getNode(ISD::UNDEF, VT); + } + } + + // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the + // constant if knowing that the operand is non-nan is enough. We prefer to + // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to + // materialize 0.0. + if (Cond == ISD::SETO || Cond == ISD::SETUO) + return DAG.getSetCC(VT, N0, N0, Cond); } if (N0 == N1) { @@ -1703,7 +1352,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, if (N0.getOpcode() == ISD::XOR) // If we know that all of the inverted bits are zero, don't bother // performing the inversion. - if (MaskedValueIsZero(N0.getOperand(0), ~LHSR->getValue())) + if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getValue())) return DAG.getSetCC(VT, N0.getOperand(0), DAG.getConstant(LHSR->getValue()^RHSC->getValue(), N0.getValueType()), Cond); @@ -1727,7 +1376,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, if (DAG.isCommutativeBinOp(N0.getOpcode())) return DAG.getSetCC(VT, N0.getOperand(0), DAG.getConstant(0, N0.getValueType()), Cond); - else { + else if (N0.Val->hasOneUse()) { assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); // (Z-X) == X --> Z == X<<1 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), @@ -1750,7 +1399,7 @@ TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, if (DAG.isCommutativeBinOp(N1.getOpcode())) { return DAG.getSetCC(VT, N1.getOperand(0), DAG.getConstant(0, N1.getValueType()), Cond); - } else { + } else if (N1.Val->hasOneUse()) { assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); // X == (Z-X) --> X<<1 == Z SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0, @@ -1860,34 +1509,62 @@ TargetLowering::getConstraintType(const std::string &Constraint) const { return C_Unknown; } -/// isOperandValidForConstraint - Return the specified operand (possibly -/// modified) if the specified SDOperand is valid for the specified target -/// constraint letter, otherwise return null. -SDOperand TargetLowering::isOperandValidForConstraint(SDOperand Op, - char ConstraintLetter, - SelectionDAG &DAG) { +/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops +/// vector. If it is invalid, don't add anything to Ops. +void TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, + char ConstraintLetter, + std::vector &Ops, + SelectionDAG &DAG) { switch (ConstraintLetter) { default: break; + case 'X': // Allows any operand; labels (basic block) use this. + if (Op.getOpcode() == ISD::BasicBlock) { + Ops.push_back(Op); + return; + } + // fall through case 'i': // Simple Integer or Relocatable Constant case 'n': // Simple Integer - case 's': // Relocatable Constant - case 'X': // Allows any operand. - // These are okay if the operand is either a global variable address or a - // simple immediate value. If we have one of these, map to the TargetXXX - // version so that the value itself doesn't get selected. - if (ConstantSDNode *C = dyn_cast(Op)) { - // Simple constants are not allowed for 's'. - if (ConstraintLetter != 's') - return DAG.getTargetConstant(C->getValue(), Op.getValueType()); + case 's': { // Relocatable Constant + // These operands are interested in values of the form (GV+C), where C may + // be folded in as an offset of GV, or it may be explicitly added. Also, it + // is possible and fine if either GV or C are missing. + ConstantSDNode *C = dyn_cast(Op); + GlobalAddressSDNode *GA = dyn_cast(Op); + + // If we have "(add GV, C)", pull out GV/C + if (Op.getOpcode() == ISD::ADD) { + C = dyn_cast(Op.getOperand(1)); + GA = dyn_cast(Op.getOperand(0)); + if (C == 0 || GA == 0) { + C = dyn_cast(Op.getOperand(0)); + GA = dyn_cast(Op.getOperand(1)); + } + if (C == 0 || GA == 0) + C = 0, GA = 0; } - if (GlobalAddressSDNode *GA = dyn_cast(Op)) { - if (ConstraintLetter != 'n') - return DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getValueType(), - GA->getOffset()); + + // If we find a valid operand, map to the TargetXXX version so that the + // value itself doesn't get selected. + if (GA) { // Either &GV or &GV+C + if (ConstraintLetter != 'n') { + int64_t Offs = GA->getOffset(); + if (C) Offs += C->getValue(); + Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), + Op.getValueType(), Offs)); + return; + } + } + if (C) { // just C, no GV. + // Simple constants are not allowed for 's'. + if (ConstraintLetter != 's') { + Ops.push_back(DAG.getTargetConstant(C->getValue(), Op.getValueType())); + return; + } } break; } - return SDOperand(0,0); + } } std::vector TargetLowering:: @@ -1974,42 +1651,6 @@ bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, return true; } -/// isLegalAddressImmediate - Return true if the integer value can be used as -/// the offset of the target addressing mode for load / store of the given -/// type. -bool TargetLowering::isLegalAddressImmediate(int64_t V, const Type *Ty) const { - return false; -} - -/// isLegalAddressImmediate - Return true if the GlobalValue can be used as -/// the offset of the target addressing mode. -bool TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const { - return false; -} - -/// isLegalAddressScale - Return true if the integer value can be used as the -/// scale of the target addressing mode for load / store of the given type. -bool TargetLowering::isLegalAddressScale(int64_t S, const Type *Ty) const { - return false; -} - -/// isLegalAddressScaleAndImm - Return true if S works for IsLegalAddressScale -/// and V works for isLegalAddressImmediate _and_ both can be applied -/// simultaneously to the same instruction. -bool TargetLowering::isLegalAddressScaleAndImm(int64_t S, int64_t V, - const Type* Ty) const { - return false; -} - -/// isLegalAddressScaleAndImm - Return true if S works for IsLegalAddressScale -/// and GV works for isLegalAddressImmediate _and_ both can be applied -/// simultaneously to the same instruction. -bool TargetLowering::isLegalAddressScaleAndImm(int64_t S, GlobalValue *GV, - const Type* Ty) const { - - return false; -} - // Magic for divide replacement struct ms { @@ -2189,21 +1830,27 @@ static mu magicu64(uint64_t d) /// multiplying by a magic number. See: /// SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, - std::vector* Created) const { + std::vector* Created) const { MVT::ValueType VT = N->getValueType(0); // Check to see if we can do this. if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) return SDOperand(); // BuildSDIV only operates on i32 or i64 - if (!isOperationLegal(ISD::MULHS, VT)) - return SDOperand(); // Make sure the target supports MULHS. int64_t d = cast(N->getOperand(1))->getSignExtended(); ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d); // Multiply the numerator (operand 0) by the magic value - SDOperand Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0), - DAG.getConstant(magics.m, VT)); + SDOperand Q; + if (isOperationLegal(ISD::MULHS, VT)) + Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0), + DAG.getConstant(magics.m, VT)); + else if (isOperationLegal(ISD::SMUL_LOHI, VT)) + Q = SDOperand(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT), + N->getOperand(0), + DAG.getConstant(magics.m, VT)).Val, 1); + else + return SDOperand(); // No mulhs or equvialent // If d > 0 and m < 0, add the numerator if (d > 0 && magics.m < 0) { Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0)); @@ -2237,21 +1884,27 @@ SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, /// multiplying by a magic number. See: /// SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, - std::vector* Created) const { + std::vector* Created) const { MVT::ValueType VT = N->getValueType(0); // Check to see if we can do this. if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) return SDOperand(); // BuildUDIV only operates on i32 or i64 - if (!isOperationLegal(ISD::MULHU, VT)) - return SDOperand(); // Make sure the target supports MULHU. uint64_t d = cast(N->getOperand(1))->getValue(); mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d); // Multiply the numerator (operand 0) by the magic value - SDOperand Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0), - DAG.getConstant(magics.m, VT)); + SDOperand Q; + if (isOperationLegal(ISD::MULHU, VT)) + Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0), + DAG.getConstant(magics.m, VT)); + else if (isOperationLegal(ISD::UMUL_LOHI, VT)) + Q = SDOperand(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT), + N->getOperand(0), + DAG.getConstant(magics.m, VT)).Val, 1); + else + return SDOperand(); // No mulhu or equvialent if (Created) Created->push_back(Q.Val);