// This implements the SelectionDAG class.
//
//===----------------------------------------------------------------------===//
-
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Constants.h"
+#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
#include "llvm/DerivedTypes.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Target/MRegisterInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
return Res;
}
+static const fltSemantics *MVTToAPFloatSemantics(MVT::ValueType VT) {
+ switch (VT) {
+ default: assert(0 && "Unknown FP format");
+ case MVT::f32: return &APFloat::IEEEsingle;
+ case MVT::f64: return &APFloat::IEEEdouble;
+ case MVT::f80: return &APFloat::x87DoubleExtended;
+ case MVT::f128: return &APFloat::IEEEquad;
+ case MVT::ppcf128: return &APFloat::PPCDoubleDouble;
+ }
+}
+
SelectionDAG::DAGUpdateListener::~DAGUpdateListener() {}
//===----------------------------------------------------------------------===//
bool ConstantFPSDNode::isValueValidForType(MVT::ValueType VT,
const APFloat& Val) {
+ assert(MVT::isFloatingPoint(VT) && "Can only convert between FP types");
+
+ // Anything can be extended to ppc long double.
+ if (VT == MVT::ppcf128)
+ return true;
+
+ // PPC long double cannot be shrunk to anything though.
+ if (&Val.getSemantics() == &APFloat::PPCDoubleDouble)
+ return false;
+
// convert modifies in place, so make a copy.
APFloat Val2 = APFloat(Val);
- switch (VT) {
- default:
- return false; // These can't be represented as floating point!
-
- // FIXME rounding mode needs to be more flexible
- case MVT::f32:
- return &Val2.getSemantics() == &APFloat::IEEEsingle ||
- Val2.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven) ==
- APFloat::opOK;
- case MVT::f64:
- return &Val2.getSemantics() == &APFloat::IEEEsingle ||
- &Val2.getSemantics() == &APFloat::IEEEdouble ||
- Val2.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven) ==
- APFloat::opOK;
- // TODO: Figure out how to test if we can use a shorter type instead!
- case MVT::f80:
- case MVT::f128:
- case MVT::ppcf128:
- return true;
- }
+ return Val2.convert(*MVTToAPFloatSemantics(VT),
+ APFloat::rmNearestTiesToEven) == APFloat::opOK;
}
//===----------------------------------------------------------------------===//
if (!cast<ConstantSDNode>(NotZero)->isAllOnesValue())
return false;
} else if (isa<ConstantFPSDNode>(NotZero)) {
- MVT::ValueType VT = NotZero.getValueType();
- if (VT== MVT::f64) {
- if (((cast<ConstantFPSDNode>(NotZero)->getValueAPF().
- convertToAPInt().getZExtValue())) != (uint64_t)-1)
- return false;
- } else {
- if ((uint32_t)cast<ConstantFPSDNode>(NotZero)->
- getValueAPF().convertToAPInt().getZExtValue() !=
- (uint32_t)-1)
- return false;
- }
+ if (!cast<ConstantFPSDNode>(NotZero)->getValueAPF().
+ convertToAPInt().isAllOnesValue())
+ return false;
} else
return false;
return true;
}
+/// isScalarToVector - Return true if the specified node is a
+/// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
+/// element is not an undef.
+bool ISD::isScalarToVector(const SDNode *N) {
+ if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
+ return true;
+
+ if (N->getOpcode() != ISD::BUILD_VECTOR)
+ return false;
+ if (N->getOperand(0).getOpcode() == ISD::UNDEF)
+ return false;
+ unsigned NumElems = N->getNumOperands();
+ for (unsigned i = 1; i < NumElems; ++i) {
+ SDOperand V = N->getOperand(i);
+ if (V.getOpcode() != ISD::UNDEF)
+ return false;
+ }
+ return true;
+}
+
+
/// isDebugLabel - Return true if the specified node represents a debug
/// label (i.e. ISD::LABEL or TargetInstrInfo::LABEL node and third operand
/// is 0).
// Handle SDNode leafs with special info.
switch (N->getOpcode()) {
default: break; // Normal nodes don't need extra info.
+ case ISD::ARG_FLAGS:
+ ID.AddInteger(cast<ARG_FLAGSSDNode>(N)->getArgFlags().getRawBits());
+ break;
case ISD::TargetConstant:
case ISD::Constant:
- ID.AddInteger(cast<ConstantSDNode>(N)->getValue());
+ ID.Add(cast<ConstantSDNode>(N)->getAPIntValue());
break;
case ISD::TargetConstantFP:
case ISD::ConstantFP: {
- ID.AddAPFloat(cast<ConstantFPSDNode>(N)->getValueAPF());
+ ID.Add(cast<ConstantFPSDNode>(N)->getValueAPF());
break;
}
case ISD::TargetGlobalAddress:
// no cycles in the graph.
for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
SDNode *Operand = I->Val;
- Operand->removeUser(N);
+ Operand->removeUser(std::distance(N->op_begin(), I), N);
// Now that we removed this operand, see if there are no uses of it left.
if (Operand->use_empty())
DeadNodes.push_back(Operand);
}
- if (N->OperandsNeedDelete)
+ if (N->OperandsNeedDelete) {
delete[] N->OperandList;
+ }
N->OperandList = 0;
N->NumOperands = 0;
// no cycles in the graph.
for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
SDNode *Operand = I->Val;
- Operand->removeUser(N);
+ Operand->removeUser(std::distance(N->op_begin(), I), N);
// Now that we removed this operand, see if there are no uses of it left.
if (Operand->use_empty())
DeadNodes.push_back(Operand);
}
- if (N->OperandsNeedDelete)
+ if (N->OperandsNeedDelete) {
delete[] N->OperandList;
+ }
N->OperandList = 0;
N->NumOperands = 0;
// Drop all of the operands and decrement used nodes use counts.
for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I)
- I->Val->removeUser(N);
- if (N->OperandsNeedDelete)
+ I->Val->removeUser(std::distance(N->op_begin(), I), N);
+ if (N->OperandsNeedDelete) {
delete[] N->OperandList;
+ }
N->OperandList = 0;
N->NumOperands = 0;
while (!AllNodes.empty()) {
SDNode *N = AllNodes.begin();
N->SetNextInBucket(0);
- if (N->OperandsNeedDelete)
+ if (N->OperandsNeedDelete) {
delete [] N->OperandList;
+ }
N->OperandList = 0;
N->NumOperands = 0;
AllNodes.pop_front();
SDOperand SelectionDAG::getZeroExtendInReg(SDOperand Op, MVT::ValueType VT) {
if (Op.getValueType() == VT) return Op;
- int64_t Imm = ~0ULL >> (64-MVT::getSizeInBits(VT));
+ APInt Imm = APInt::getLowBitsSet(Op.getValueSizeInBits(),
+ MVT::getSizeInBits(VT));
return getNode(ISD::AND, Op.getValueType(), Op,
getConstant(Imm, Op.getValueType()));
}
unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
- ID.AddAPInt(Val);
+ ID.Add(Val);
void *IP = 0;
SDNode *N = NULL;
if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
- ID.AddAPFloat(V);
+ ID.Add(V);
void *IP = 0;
SDNode *N = NULL;
if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
SDOperand SelectionDAG::getGlobalAddress(const GlobalValue *GV,
MVT::ValueType VT, int Offset,
bool isTargetGA) {
- const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
unsigned Opc;
+
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
+ if (!GVar) {
+ // If GV is an alias then use the aliasee for determining thread-localness.
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal());
+ }
+
if (GVar && GVar->isThreadLocal())
Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
else
Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
+
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
ID.AddPointer(GV);
return SDOperand(N, 0);
}
+SDOperand SelectionDAG::getArgFlags(ISD::ArgFlagsTy Flags) {
+ FoldingSetNodeID ID;
+ AddNodeIDNode(ID, ISD::ARG_FLAGS, getVTList(MVT::Other), 0, 0);
+ ID.AddInteger(Flags.getRawBits());
+ void *IP = 0;
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ return SDOperand(E, 0);
+ SDNode *N = new ARG_FLAGSSDNode(Flags);
+ CSEMap.InsertNode(N, IP);
+ AllNodes.push_back(N);
+ return SDOperand(N, 0);
+}
+
SDOperand SelectionDAG::getValueType(MVT::ValueType VT) {
if (!MVT::isExtendedVT(VT) && (unsigned)VT >= ValueTypeNodes.size())
ValueTypeNodes.resize(VT+1);
}
if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.Val)) {
- uint64_t C2 = N2C->getValue();
+ const APInt &C2 = N2C->getAPIntValue();
if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val)) {
- uint64_t C1 = N1C->getValue();
-
- // Sign extend the operands if required
- if (ISD::isSignedIntSetCC(Cond)) {
- C1 = N1C->getSignExtended();
- C2 = N2C->getSignExtended();
- }
+ const APInt &C1 = N1C->getAPIntValue();
switch (Cond) {
default: assert(0 && "Unknown integer setcc!");
case ISD::SETEQ: return getConstant(C1 == C2, VT);
case ISD::SETNE: return getConstant(C1 != C2, VT);
- case ISD::SETULT: return getConstant(C1 < C2, VT);
- case ISD::SETUGT: return getConstant(C1 > C2, VT);
- case ISD::SETULE: return getConstant(C1 <= C2, VT);
- case ISD::SETUGE: return getConstant(C1 >= C2, VT);
- case ISD::SETLT: return getConstant((int64_t)C1 < (int64_t)C2, VT);
- case ISD::SETGT: return getConstant((int64_t)C1 > (int64_t)C2, VT);
- case ISD::SETLE: return getConstant((int64_t)C1 <= (int64_t)C2, VT);
- case ISD::SETGE: return getConstant((int64_t)C1 >= (int64_t)C2, VT);
+ case ISD::SETULT: return getConstant(C1.ult(C2), VT);
+ case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
+ case ISD::SETULE: return getConstant(C1.ule(C2), VT);
+ case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
+ case ISD::SETLT: return getConstant(C1.slt(C2), VT);
+ case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
+ case ISD::SETLE: return getConstant(C1.sle(C2), VT);
+ case ISD::SETGE: return getConstant(C1.sge(C2), VT);
}
}
}
- if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.Val))
+ if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.Val)) {
if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.Val)) {
// No compile time operations on this type yet.
if (N1C->getValueType(0) == MVT::ppcf128)
// Ensure that the constant occurs on the RHS.
return getSetCC(VT, N2, N1, ISD::getSetCCSwappedOperands(Cond));
}
-
+ }
+
// Could not fold it.
return SDOperand();
}
+/// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
+/// use this predicate to simplify operations downstream.
+bool SelectionDAG::SignBitIsZero(SDOperand Op, unsigned Depth) const {
+ unsigned BitWidth = Op.getValueSizeInBits();
+ return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
+}
+
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be zero
/// for bits that V cannot have.
-bool SelectionDAG::MaskedValueIsZero(SDOperand Op, uint64_t Mask,
+bool SelectionDAG::MaskedValueIsZero(SDOperand Op, const APInt &Mask,
unsigned Depth) const {
- // The masks are not wide enough to represent this type! Should use APInt.
- if (Op.getValueType() == MVT::i128)
- return false;
-
- uint64_t KnownZero, KnownOne;
+ APInt KnownZero, KnownOne;
ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
return (KnownZero & Mask) == Mask;
/// known to be either zero or one and return them in the KnownZero/KnownOne
/// bitsets. This code only analyzes bits in Mask, in order to short-circuit
/// processing.
-void SelectionDAG::ComputeMaskedBits(SDOperand Op, uint64_t Mask,
- uint64_t &KnownZero, uint64_t &KnownOne,
+void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask,
+ APInt &KnownZero, APInt &KnownOne,
unsigned Depth) const {
- KnownZero = KnownOne = 0; // Don't know anything.
+ unsigned BitWidth = Mask.getBitWidth();
+ assert(BitWidth == MVT::getSizeInBits(Op.getValueType()) &&
+ "Mask size mismatches value type size!");
+
+ KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
if (Depth == 6 || Mask == 0)
return; // Limit search depth.
- // The masks are not wide enough to represent this type! Should use APInt.
- if (Op.getValueType() == MVT::i128)
- return;
-
- uint64_t KnownZero2, KnownOne2;
+ APInt KnownZero2, KnownOne2;
switch (Op.getOpcode()) {
case ISD::Constant:
// We know all of the bits for a constant!
- KnownOne = cast<ConstantSDNode>(Op)->getValue() & Mask;
+ KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & Mask;
KnownZero = ~KnownOne & Mask;
return;
case ISD::AND:
// If either the LHS or the RHS are Zero, the result is zero.
ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
- Mask &= ~KnownZero;
- ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownZero,
+ KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
return;
case ISD::OR:
ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
- Mask &= ~KnownOne;
- ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownOne,
+ KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// Output known-0 bits are known if clear or set in both the LHS & RHS.
- uint64_t KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
+ APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
KnownZero = KnownZeroOut;
return;
case ISD::SETCC:
// If we know the result of a setcc has the top bits zero, use this info.
- if (TLI.getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult)
- KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
+ if (TLI.getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult &&
+ BitWidth > 1)
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
return;
case ISD::SHL:
// (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- ComputeMaskedBits(Op.getOperand(0), Mask >> SA->getValue(),
+ unsigned ShAmt = SA->getValue();
+
+ // If the shift count is an invalid immediate, don't do anything.
+ if (ShAmt >= BitWidth)
+ return;
+
+ ComputeMaskedBits(Op.getOperand(0), Mask.lshr(ShAmt),
KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero <<= SA->getValue();
- KnownOne <<= SA->getValue();
- KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero.
+ KnownZero <<= ShAmt;
+ KnownOne <<= ShAmt;
+ // low bits known zero.
+ KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
}
return;
case ISD::SRL:
// (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- MVT::ValueType VT = Op.getValueType();
unsigned ShAmt = SA->getValue();
- uint64_t TypeMask = MVT::getIntVTBitMask(VT);
- ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt) & TypeMask,
+ // If the shift count is an invalid immediate, don't do anything.
+ if (ShAmt >= BitWidth)
+ return;
+
+ ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt),
KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero &= TypeMask;
- KnownOne &= TypeMask;
- KnownZero >>= ShAmt;
- KnownOne >>= ShAmt;
+ KnownZero = KnownZero.lshr(ShAmt);
+ KnownOne = KnownOne.lshr(ShAmt);
- uint64_t HighBits = (1ULL << ShAmt)-1;
- HighBits <<= MVT::getSizeInBits(VT)-ShAmt;
+ APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
KnownZero |= HighBits; // High bits known zero.
}
return;
case ISD::SRA:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- MVT::ValueType VT = Op.getValueType();
unsigned ShAmt = SA->getValue();
- // Compute the new bits that are at the top now.
- uint64_t TypeMask = MVT::getIntVTBitMask(VT);
+ // If the shift count is an invalid immediate, don't do anything.
+ if (ShAmt >= BitWidth)
+ return;
- uint64_t InDemandedMask = (Mask << ShAmt) & TypeMask;
+ APInt InDemandedMask = (Mask << ShAmt);
// If any of the demanded bits are produced by the sign extension, we also
// demand the input sign bit.
- uint64_t HighBits = (1ULL << ShAmt)-1;
- HighBits <<= MVT::getSizeInBits(VT) - ShAmt;
- if (HighBits & Mask)
- InDemandedMask |= MVT::getIntVTSignBit(VT);
+ APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
+ if (HighBits.getBoolValue())
+ InDemandedMask |= APInt::getSignBit(BitWidth);
ComputeMaskedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne,
Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero &= TypeMask;
- KnownOne &= TypeMask;
- KnownZero >>= ShAmt;
- KnownOne >>= ShAmt;
+ KnownZero = KnownZero.lshr(ShAmt);
+ KnownOne = KnownOne.lshr(ShAmt);
// Handle the sign bits.
- uint64_t SignBit = MVT::getIntVTSignBit(VT);
- SignBit >>= ShAmt; // Adjust to where it is now in the mask.
+ APInt SignBit = APInt::getSignBit(BitWidth);
+ SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
- if (KnownZero & SignBit) {
+ if (KnownZero.intersects(SignBit)) {
KnownZero |= HighBits; // New bits are known zero.
- } else if (KnownOne & SignBit) {
+ } else if (KnownOne.intersects(SignBit)) {
KnownOne |= HighBits; // New bits are known one.
}
}
return;
case ISD::SIGN_EXTEND_INREG: {
MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ unsigned EBits = MVT::getSizeInBits(EVT);
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
- uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & Mask;
+ APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits) & Mask;
- uint64_t InSignBit = MVT::getIntVTSignBit(EVT);
- int64_t InputDemandedBits = Mask & MVT::getIntVTBitMask(EVT);
+ APInt InSignBit = APInt::getSignBit(EBits);
+ APInt InputDemandedBits = Mask & APInt::getLowBitsSet(BitWidth, EBits);
// If the sign extended bits are demanded, we know that the sign
// bit is demanded.
- if (NewBits)
+ InSignBit.zext(BitWidth);
+ if (NewBits.getBoolValue())
InputDemandedBits |= InSignBit;
ComputeMaskedBits(Op.getOperand(0), InputDemandedBits,
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
- if (KnownZero & InSignBit) { // Input sign bit known clear
+ if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
KnownZero |= NewBits;
KnownOne &= ~NewBits;
- } else if (KnownOne & InSignBit) { // Input sign bit known set
+ } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
KnownOne |= NewBits;
KnownZero &= ~NewBits;
} else { // Input sign bit unknown
case ISD::CTTZ:
case ISD::CTLZ:
case ISD::CTPOP: {
- MVT::ValueType VT = Op.getValueType();
- unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1;
- KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT);
- KnownOne = 0;
+ unsigned LowBits = Log2_32(BitWidth)+1;
+ KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
+ KnownOne = APInt(BitWidth, 0);
return;
}
case ISD::LOAD: {
if (ISD::isZEXTLoad(Op.Val)) {
LoadSDNode *LD = cast<LoadSDNode>(Op);
MVT::ValueType VT = LD->getMemoryVT();
- KnownZero |= ~MVT::getIntVTBitMask(VT) & Mask;
+ unsigned MemBits = MVT::getSizeInBits(VT);
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits) & Mask;
}
return;
}
case ISD::ZERO_EXTEND: {
- uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
- uint64_t NewBits = (~InMask) & Mask;
- ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
- KnownOne, Depth+1);
- KnownZero |= NewBits & Mask;
- KnownOne &= ~NewBits;
+ MVT::ValueType InVT = Op.getOperand(0).getValueType();
+ unsigned InBits = MVT::getSizeInBits(InVT);
+ APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
+ APInt InMask = Mask;
+ InMask.trunc(InBits);
+ KnownZero.trunc(InBits);
+ KnownOne.trunc(InBits);
+ ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
+ KnownZero.zext(BitWidth);
+ KnownOne.zext(BitWidth);
+ KnownZero |= NewBits;
return;
}
case ISD::SIGN_EXTEND: {
MVT::ValueType InVT = Op.getOperand(0).getValueType();
- unsigned InBits = MVT::getSizeInBits(InVT);
- uint64_t InMask = MVT::getIntVTBitMask(InVT);
- uint64_t InSignBit = 1ULL << (InBits-1);
- uint64_t NewBits = (~InMask) & Mask;
- uint64_t InDemandedBits = Mask & InMask;
+ unsigned InBits = MVT::getSizeInBits(InVT);
+ APInt InSignBit = APInt::getSignBit(InBits);
+ APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
+ APInt InMask = Mask;
+ InMask.trunc(InBits);
// If any of the sign extended bits are demanded, we know that the sign
- // bit is demanded.
- if (NewBits & Mask)
- InDemandedBits |= InSignBit;
-
- ComputeMaskedBits(Op.getOperand(0), InDemandedBits, KnownZero,
- KnownOne, Depth+1);
- // If the sign bit is known zero or one, the top bits match.
- if (KnownZero & InSignBit) {
+ // bit is demanded. Temporarily set this bit in the mask for our callee.
+ if (NewBits.getBoolValue())
+ InMask |= InSignBit;
+
+ KnownZero.trunc(InBits);
+ KnownOne.trunc(InBits);
+ ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
+
+ // Note if the sign bit is known to be zero or one.
+ bool SignBitKnownZero = KnownZero.isNegative();
+ bool SignBitKnownOne = KnownOne.isNegative();
+ assert(!(SignBitKnownZero && SignBitKnownOne) &&
+ "Sign bit can't be known to be both zero and one!");
+
+ // If the sign bit wasn't actually demanded by our caller, we don't
+ // want it set in the KnownZero and KnownOne result values. Reset the
+ // mask and reapply it to the result values.
+ InMask = Mask;
+ InMask.trunc(InBits);
+ KnownZero &= InMask;
+ KnownOne &= InMask;
+
+ KnownZero.zext(BitWidth);
+ KnownOne.zext(BitWidth);
+
+ // If the sign bit is known zero or one, the top bits match.
+ if (SignBitKnownZero)
KnownZero |= NewBits;
- KnownOne &= ~NewBits;
- } else if (KnownOne & InSignBit) {
+ else if (SignBitKnownOne)
KnownOne |= NewBits;
- KnownZero &= ~NewBits;
- } else { // Otherwise, top bits aren't known.
- KnownOne &= ~NewBits;
- KnownZero &= ~NewBits;
- }
return;
}
case ISD::ANY_EXTEND: {
- MVT::ValueType VT = Op.getOperand(0).getValueType();
- ComputeMaskedBits(Op.getOperand(0), Mask & MVT::getIntVTBitMask(VT),
- KnownZero, KnownOne, Depth+1);
+ MVT::ValueType InVT = Op.getOperand(0).getValueType();
+ unsigned InBits = MVT::getSizeInBits(InVT);
+ APInt InMask = Mask;
+ InMask.trunc(InBits);
+ KnownZero.trunc(InBits);
+ KnownOne.trunc(InBits);
+ ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
+ KnownZero.zext(BitWidth);
+ KnownOne.zext(BitWidth);
return;
}
case ISD::TRUNCATE: {
- ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
+ MVT::ValueType InVT = Op.getOperand(0).getValueType();
+ unsigned InBits = MVT::getSizeInBits(InVT);
+ APInt InMask = Mask;
+ InMask.zext(InBits);
+ KnownZero.zext(InBits);
+ KnownOne.zext(InBits);
+ ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType());
- KnownZero &= OutMask;
- KnownOne &= OutMask;
+ KnownZero.trunc(BitWidth);
+ KnownOne.trunc(BitWidth);
break;
}
case ISD::AssertZext: {
MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- uint64_t InMask = MVT::getIntVTBitMask(VT);
+ APInt InMask = APInt::getLowBitsSet(BitWidth, MVT::getSizeInBits(VT));
ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
KnownOne, Depth+1);
KnownZero |= (~InMask) & Mask;
}
case ISD::FGETSIGN:
// All bits are zero except the low bit.
- KnownZero = MVT::getIntVTBitMask(Op.getValueType()) ^ 1;
+ KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
return;
case ISD::ADD: {
// Output known-0 bits are known if clear or set in both the low clear bits
// common to both LHS & RHS. For example, 8+(X<<3) is known to have the
// low 3 bits clear.
- uint64_t KnownZeroOut = std::min(CountTrailingZeros_64(~KnownZero),
- CountTrailingZeros_64(~KnownZero2));
+ unsigned KnownZeroOut = std::min(KnownZero.countTrailingOnes(),
+ KnownZero2.countTrailingOnes());
- KnownZero = (1ULL << KnownZeroOut) - 1;
- KnownOne = 0;
+ KnownZero = APInt::getLowBitsSet(BitWidth, KnownZeroOut);
+ KnownOne = APInt(BitWidth, 0);
return;
}
case ISD::SUB: {
// We know that the top bits of C-X are clear if X contains less bits
// than C (i.e. no wrap-around can happen). For example, 20-X is
// positive if we can prove that X is >= 0 and < 16.
- MVT::ValueType VT = CLHS->getValueType(0);
- if ((CLHS->getValue() & MVT::getIntVTSignBit(VT)) == 0) { // sign bit clear
- unsigned NLZ = CountLeadingZeros_64(CLHS->getValue()+1);
- uint64_t MaskV = (1ULL << (63-NLZ))-1; // NLZ can't be 64 with no sign bit
- MaskV = ~MaskV & MVT::getIntVTBitMask(VT);
+ if (CLHS->getAPIntValue().isNonNegative()) {
+ unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
+ // NLZ can't be BitWidth with no sign bit
+ APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero, KnownOne, Depth+1);
// If all of the MaskV bits are known to be zero, then we know the output
// top bits are zero, because we now know that the output is from [0-C].
if ((KnownZero & MaskV) == MaskV) {
- unsigned NLZ2 = CountLeadingZeros_64(CLHS->getValue());
- KnownZero = ~((1ULL << (64-NLZ2))-1) & Mask; // Top bits known zero.
- KnownOne = 0; // No one bits known.
+ unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
+ // Top bits known zero.
+ KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
+ KnownOne = APInt(BitWidth, 0); // No one bits known.
} else {
- KnownZero = KnownOne = 0; // Otherwise, nothing known.
+ KnownZero = KnownOne = APInt(BitWidth, 0); // Otherwise, nothing known.
}
}
return;
return VTBits-Tmp;
case ISD::Constant: {
- uint64_t Val = cast<ConstantSDNode>(Op)->getValue();
- // If negative, invert the bits, then look at it.
- if (Val & MVT::getIntVTSignBit(VT))
- Val = ~Val;
-
- // Shift the bits so they are the leading bits in the int64_t.
- Val <<= 64-VTBits;
+ const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
+ // If negative, return # leading ones.
+ if (Val.isNegative())
+ return Val.countLeadingOnes();
- // Return # leading zeros. We use 'min' here in case Val was zero before
- // shifting. We don't want to return '64' as for an i32 "0".
- return std::min(VTBits, CountLeadingZeros_64(Val));
+ // Return # leading zeros.
+ return Val.countLeadingZeros();
}
case ISD::SIGN_EXTEND:
// Special case decrementing a value (ADD X, -1):
if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
if (CRHS->isAllOnesValue()) {
- uint64_t KnownZero, KnownOne;
- uint64_t Mask = MVT::getIntVTBitMask(VT);
+ APInt KnownZero, KnownOne;
+ APInt Mask = APInt::getAllOnesValue(VTBits);
ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
- if ((KnownZero|1) == Mask)
+ if ((KnownZero | APInt(VTBits, 1)) == Mask)
return VTBits;
// If we are subtracting one from a positive number, there is no carry
// out of the result.
- if (KnownZero & MVT::getIntVTSignBit(VT))
+ if (KnownZero.isNegative())
return Tmp;
}
// Handle NEG.
if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
- if (CLHS->getValue() == 0) {
- uint64_t KnownZero, KnownOne;
- uint64_t Mask = MVT::getIntVTBitMask(VT);
+ if (CLHS->isNullValue()) {
+ APInt KnownZero, KnownOne;
+ APInt Mask = APInt::getAllOnesValue(VTBits);
ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
- if ((KnownZero|1) == Mask)
+ if ((KnownZero | APInt(VTBits, 1)) == Mask)
return VTBits;
// If the input is known to be positive (the sign bit is known clear),
// the output of the NEG has the same number of sign bits as the input.
- if (KnownZero & MVT::getIntVTSignBit(VT))
+ if (KnownZero.isNegative())
return Tmp2;
// Otherwise, we treat this like a SUB.
// Finally, if we can prove that the top bits of the result are 0's or 1's,
// use this information.
- uint64_t KnownZero, KnownOne;
- uint64_t Mask = MVT::getIntVTBitMask(VT);
+ APInt KnownZero, KnownOne;
+ APInt Mask = APInt::getAllOnesValue(VTBits);
ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
- uint64_t SignBit = MVT::getIntVTSignBit(VT);
- if (KnownZero & SignBit) { // SignBit is 0
+ if (KnownZero.isNegative()) { // sign bit is 0
Mask = KnownZero;
- } else if (KnownOne & SignBit) { // SignBit is 1;
+ } else if (KnownOne.isNegative()) { // sign bit is 1;
Mask = KnownOne;
} else {
// Nothing known.
// Okay, we know that the sign bit in Mask is set. Use CLZ to determine
// the number of identical bits in the top of the input value.
- Mask ^= ~0ULL;
- Mask <<= 64-VTBits;
+ Mask = ~Mask;
+ Mask <<= Mask.getBitWidth()-VTBits;
// Return # leading zeros. We use 'min' here in case Val was zero before
// shifting. We don't want to return '64' as for an i32 "0".
- return std::min(VTBits, CountLeadingZeros_64(Mask));
+ return std::min(VTBits, Mask.countLeadingZeros());
}
SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT,
SDOperand Operand) {
- unsigned Tmp1;
// Constant fold unary operations with an integer constant operand.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.Val)) {
- uint64_t Val = C->getValue();
+ const APInt &Val = C->getAPIntValue();
+ unsigned BitWidth = MVT::getSizeInBits(VT);
switch (Opcode) {
default: break;
- case ISD::SIGN_EXTEND: return getConstant(C->getSignExtended(), VT);
+ case ISD::SIGN_EXTEND:
+ return getConstant(APInt(Val).sextOrTrunc(BitWidth), VT);
case ISD::ANY_EXTEND:
- case ISD::ZERO_EXTEND: return getConstant(Val, VT);
- case ISD::TRUNCATE: return getConstant(Val, VT);
+ case ISD::ZERO_EXTEND:
+ case ISD::TRUNCATE:
+ return getConstant(APInt(Val).zextOrTrunc(BitWidth), VT);
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: {
const uint64_t zero[] = {0, 0};
// No compile time operations on this type.
if (VT==MVT::ppcf128)
break;
- APFloat apf = APFloat(APInt(MVT::getSizeInBits(VT), 2, zero));
- (void)apf.convertFromZeroExtendedInteger(&Val,
- MVT::getSizeInBits(Operand.getValueType()),
- Opcode==ISD::SINT_TO_FP,
- APFloat::rmNearestTiesToEven);
+ APFloat apf = APFloat(APInt(BitWidth, 2, zero));
+ (void)apf.convertFromAPInt(Val,
+ Opcode==ISD::SINT_TO_FP,
+ APFloat::rmNearestTiesToEven);
return getConstantFP(apf, VT);
}
case ISD::BIT_CONVERT:
if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
- return getConstantFP(BitsToFloat(Val), VT);
+ return getConstantFP(Val.bitsToFloat(), VT);
else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
- return getConstantFP(BitsToDouble(Val), VT);
+ return getConstantFP(Val.bitsToDouble(), VT);
break;
case ISD::BSWAP:
- switch(VT) {
- default: assert(0 && "Invalid bswap!"); break;
- case MVT::i16: return getConstant(ByteSwap_16((unsigned short)Val), VT);
- case MVT::i32: return getConstant(ByteSwap_32((unsigned)Val), VT);
- case MVT::i64: return getConstant(ByteSwap_64(Val), VT);
- }
- break;
+ return getConstant(Val.byteSwap(), VT);
case ISD::CTPOP:
- switch(VT) {
- default: assert(0 && "Invalid ctpop!"); break;
- case MVT::i1: return getConstant(Val != 0, VT);
- case MVT::i8:
- Tmp1 = (unsigned)Val & 0xFF;
- return getConstant(CountPopulation_32(Tmp1), VT);
- case MVT::i16:
- Tmp1 = (unsigned)Val & 0xFFFF;
- return getConstant(CountPopulation_32(Tmp1), VT);
- case MVT::i32:
- return getConstant(CountPopulation_32((unsigned)Val), VT);
- case MVT::i64:
- return getConstant(CountPopulation_64(Val), VT);
- }
+ return getConstant(Val.countPopulation(), VT);
case ISD::CTLZ:
- switch(VT) {
- default: assert(0 && "Invalid ctlz!"); break;
- case MVT::i1: return getConstant(Val == 0, VT);
- case MVT::i8:
- Tmp1 = (unsigned)Val & 0xFF;
- return getConstant(CountLeadingZeros_32(Tmp1)-24, VT);
- case MVT::i16:
- Tmp1 = (unsigned)Val & 0xFFFF;
- return getConstant(CountLeadingZeros_32(Tmp1)-16, VT);
- case MVT::i32:
- return getConstant(CountLeadingZeros_32((unsigned)Val), VT);
- case MVT::i64:
- return getConstant(CountLeadingZeros_64(Val), VT);
- }
+ return getConstant(Val.countLeadingZeros(), VT);
case ISD::CTTZ:
- switch(VT) {
- default: assert(0 && "Invalid cttz!"); break;
- case MVT::i1: return getConstant(Val == 0, VT);
- case MVT::i8:
- Tmp1 = (unsigned)Val | 0x100;
- return getConstant(CountTrailingZeros_32(Tmp1), VT);
- case MVT::i16:
- Tmp1 = (unsigned)Val | 0x10000;
- return getConstant(CountTrailingZeros_32(Tmp1), VT);
- case MVT::i32:
- return getConstant(CountTrailingZeros_32((unsigned)Val), VT);
- case MVT::i64:
- return getConstant(CountTrailingZeros_64(Val), VT);
- }
+ return getConstant(Val.countTrailingZeros(), VT);
}
}
case ISD::FP_EXTEND:
// This can return overflow, underflow, or inexact; we don't care.
// FIXME need to be more flexible about rounding mode.
- (void) V.convert(VT==MVT::f32 ? APFloat::IEEEsingle :
- VT==MVT::f64 ? APFloat::IEEEdouble :
- VT==MVT::f80 ? APFloat::x87DoubleExtended :
- VT==MVT::f128 ? APFloat::IEEEquad :
- APFloat::Bogus,
- APFloat::rmNearestTiesToEven);
+ (void)V.convert(*MVTToAPFloatSemantics(VT),
+ APFloat::rmNearestTiesToEven);
return getConstantFP(V, VT);
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: {
assert(MVT::isFloatingPoint(VT) &&
MVT::isFloatingPoint(Operand.getValueType()) && "Invalid FP cast!");
if (Operand.getValueType() == VT) return Operand; // noop conversion.
+ if (Operand.getOpcode() == ISD::UNDEF)
+ return getNode(ISD::UNDEF, VT);
break;
- case ISD::SIGN_EXTEND:
+ case ISD::SIGN_EXTEND:
assert(MVT::isInteger(VT) && MVT::isInteger(Operand.getValueType()) &&
"Invalid SIGN_EXTEND!");
if (Operand.getValueType() == VT) return Operand; // noop extension
assert(MVT::isVector(VT) && !MVT::isVector(Operand.getValueType()) &&
MVT::getVectorElementType(VT) == Operand.getValueType() &&
"Illegal SCALAR_TO_VECTOR node!");
+ if (OpOpcode == ISD::UNDEF)
+ return getNode(ISD::UNDEF, VT);
+ // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
+ if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
+ isa<ConstantSDNode>(Operand.getOperand(1)) &&
+ Operand.getConstantOperandVal(1) == 0 &&
+ Operand.getOperand(0).getValueType() == VT)
+ return Operand.getOperand(0);
break;
case ISD::FNEG:
if (OpOpcode == ISD::FSUB) // -(X-Y) -> (Y-X)
N1.getValueType() == VT && "Binary operator types must match!");
// (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
// worth handling here.
- if (N2C && N2C->getValue() == 0)
+ if (N2C && N2C->isNullValue())
return N2;
if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
return N1;
N1.getValueType() == VT && "Binary operator types must match!");
// (X ^| 0) -> X. This commonly occurs when legalizing i64 values, so it's
// worth handling here.
- if (N2C && N2C->getValue() == 0)
+ if (N2C && N2C->isNullValue())
return N1;
break;
case ISD::UDIV:
if (EVT == VT) return N1; // Not actually extending
if (N1C) {
- int64_t Val = N1C->getValue();
+ APInt Val = N1C->getAPIntValue();
unsigned FromBits = MVT::getSizeInBits(cast<VTSDNode>(N2)->getVT());
- Val <<= 64-FromBits;
- Val >>= 64-FromBits;
+ Val <<= Val.getBitWidth()-FromBits;
+ Val = Val.ashr(Val.getBitWidth()-FromBits);
return getConstant(Val, VT);
}
break;
case ISD::EXTRACT_VECTOR_ELT:
assert(N2C && "Bad EXTRACT_VECTOR_ELT!");
+ // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
+ if (N1.getOpcode() == ISD::UNDEF)
+ return getNode(ISD::UNDEF, VT);
+
// EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
// expanding copies of large vectors from registers.
if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
// expanding large vector constants.
if (N1.getOpcode() == ISD::BUILD_VECTOR)
return N1.getOperand(N2C->getValue());
-
+
// EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
// operations are lowered to scalars.
if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT)
break;
case ISD::EXTRACT_ELEMENT:
assert(N2C && (unsigned)N2C->getValue() < 2 && "Bad EXTRACT_ELEMENT!");
-
+ assert(!MVT::isVector(N1.getValueType()) &&
+ MVT::isInteger(N1.getValueType()) &&
+ !MVT::isVector(VT) && MVT::isInteger(VT) &&
+ "EXTRACT_ELEMENT only applies to integers!");
+
// EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
// 64-bit integers into 32-bit parts. Instead of building the extract of
// the BUILD_PAIR, only to have legalize rip it apart, just do it now.
if (N1.getOpcode() == ISD::BUILD_PAIR)
return N1.getOperand(N2C->getValue());
-
+
// EXTRACT_ELEMENT of a constant int is also very common.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
- unsigned Shift = MVT::getSizeInBits(VT) * N2C->getValue();
- return getConstant(C->getValue() >> Shift, VT);
+ unsigned ElementSize = MVT::getSizeInBits(VT);
+ unsigned Shift = ElementSize * N2C->getValue();
+ APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
+ return getConstant(ShiftedVal.trunc(ElementSize), VT);
}
break;
+ case ISD::EXTRACT_SUBVECTOR:
+ if (N1.getValueType() == VT) // Trivial extraction.
+ return N1;
+ break;
}
if (N1C) {
if (N2C) {
- uint64_t C1 = N1C->getValue(), C2 = N2C->getValue();
+ APInt C1 = N1C->getAPIntValue(), C2 = N2C->getAPIntValue();
switch (Opcode) {
case ISD::ADD: return getConstant(C1 + C2, VT);
case ISD::SUB: return getConstant(C1 - C2, VT);
case ISD::MUL: return getConstant(C1 * C2, VT);
case ISD::UDIV:
- if (C2) return getConstant(C1 / C2, VT);
+ if (C2.getBoolValue()) return getConstant(C1.udiv(C2), VT);
break;
case ISD::UREM :
- if (C2) return getConstant(C1 % C2, VT);
+ if (C2.getBoolValue()) return getConstant(C1.urem(C2), VT);
break;
case ISD::SDIV :
- if (C2) return getConstant(N1C->getSignExtended() /
- N2C->getSignExtended(), VT);
+ if (C2.getBoolValue()) return getConstant(C1.sdiv(C2), VT);
break;
case ISD::SREM :
- if (C2) return getConstant(N1C->getSignExtended() %
- N2C->getSignExtended(), VT);
+ if (C2.getBoolValue()) return getConstant(C1.srem(C2), VT);
break;
case ISD::AND : return getConstant(C1 & C2, VT);
case ISD::OR : return getConstant(C1 | C2, VT);
case ISD::XOR : return getConstant(C1 ^ C2, VT);
case ISD::SHL : return getConstant(C1 << C2, VT);
- case ISD::SRL : return getConstant(C1 >> C2, VT);
- case ISD::SRA : return getConstant(N1C->getSignExtended() >>(int)C2, VT);
- case ISD::ROTL :
- return getConstant((C1 << C2) | (C1 >> (MVT::getSizeInBits(VT) - C2)),
- VT);
- case ISD::ROTR :
- return getConstant((C1 >> C2) | (C1 << (MVT::getSizeInBits(VT) - C2)),
- VT);
+ case ISD::SRL : return getConstant(C1.lshr(C2), VT);
+ case ISD::SRA : return getConstant(C1.ashr(C2), VT);
+ case ISD::ROTL : return getConstant(C1.rotl(C2), VT);
+ case ISD::ROTR : return getConstant(C1.rotr(C2), VT);
default: break;
}
} else { // Cannonicalize constant to RHS if commutative
// Fold a bunch of operators when the RHS is undef.
if (N2.getOpcode() == ISD::UNDEF) {
switch (Opcode) {
+ case ISD::XOR:
+ if (N1.getOpcode() == ISD::UNDEF)
+ // Handle undef ^ undef -> 0 special case. This is a common
+ // idiom (misuse).
+ return getConstant(0, VT);
+ // fallthrough
case ISD::ADD:
case ISD::ADDC:
case ISD::ADDE:
case ISD::SDIV:
case ISD::UREM:
case ISD::SREM:
- case ISD::XOR:
return N2; // fold op(arg1, undef) -> undef
case ISD::MUL:
case ISD::AND:
break;
}
case ISD::SELECT:
- if (N1C)
- if (N1C->getValue())
+ if (N1C) {
+ if (N1C->getValue())
return N2; // select true, X, Y -> X
else
return N3; // select false, X, Y -> Y
+ }
if (N2 == N3) return N2; // select C, X, X -> X
break;
case ISD::BRCOND:
- if (N2C)
+ if (N2C) {
if (N2C->getValue()) // Unconditional branch
return getNode(ISD::BR, MVT::Other, N1, N3);
else
return N1; // Never-taken branch
+ }
break;
case ISD::VECTOR_SHUFFLE:
assert(VT == N1.getValueType() && VT == N2.getValueType() &&
return getNode(ISD::MEMSET, MVT::Other, Ops, 6);
}
-SDOperand SelectionDAG::getLoad(MVT::ValueType VT,
- SDOperand Chain, SDOperand Ptr,
- const Value *SV, int SVOffset,
- bool isVolatile, unsigned Alignment) {
- if (Alignment == 0) { // Ensure that codegen never sees alignment 0
- const Type *Ty = 0;
- if (VT != MVT::iPTR) {
- Ty = MVT::getTypeForValueType(VT);
- } else if (SV) {
- const PointerType *PT = dyn_cast<PointerType>(SV->getType());
- assert(PT && "Value for load must be a pointer");
- Ty = PT->getElementType();
- }
- assert(Ty && "Could not get type information for load");
- Alignment = TLI.getTargetData()->getABITypeAlignment(Ty);
- }
- SDVTList VTs = getVTList(VT, MVT::Other);
- SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType());
- SDOperand Ops[] = { Chain, Ptr, Undef };
+SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain,
+ SDOperand Ptr, SDOperand Cmp,
+ SDOperand Swp, MVT::ValueType VT) {
+ assert(Opcode == ISD::ATOMIC_LCS && "Invalid Atomic Op");
+ assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
+ SDVTList VTs = getVTList(Cmp.getValueType(), MVT::Other);
FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
- ID.AddInteger(ISD::UNINDEXED);
- ID.AddInteger(ISD::NON_EXTLOAD);
+ SDOperand Ops[] = {Chain, Ptr, Cmp, Swp};
+ AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
ID.AddInteger((unsigned int)VT);
- ID.AddInteger(Alignment);
- ID.AddInteger(isVolatile);
- void *IP = 0;
+ void* IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDOperand(E, 0);
- SDNode *N = new LoadSDNode(Ops, VTs, ISD::UNINDEXED,
- ISD::NON_EXTLOAD, VT, SV, SVOffset, Alignment,
- isVolatile);
+ SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Cmp, Swp, VT);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDOperand(N, 0);
}
-SDOperand SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, MVT::ValueType VT,
- SDOperand Chain, SDOperand Ptr,
- const Value *SV,
- int SVOffset, MVT::ValueType EVT,
- bool isVolatile, unsigned Alignment) {
- // If they are asking for an extending load from/to the same thing, return a
- // normal load.
- if (VT == EVT)
- return getLoad(VT, Chain, Ptr, SV, SVOffset, isVolatile, Alignment);
-
- if (MVT::isVector(VT))
- assert(EVT == MVT::getVectorElementType(VT) && "Invalid vector extload!");
- else
- assert(MVT::getSizeInBits(EVT) < MVT::getSizeInBits(VT) &&
- "Should only be an extending load, not truncating!");
- assert((ExtType == ISD::EXTLOAD || MVT::isInteger(VT)) &&
- "Cannot sign/zero extend a FP/Vector load!");
- assert(MVT::isInteger(VT) == MVT::isInteger(EVT) &&
- "Cannot convert from FP to Int or Int -> FP!");
+SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain,
+ SDOperand Ptr, SDOperand Val,
+ MVT::ValueType VT) {
+ assert((Opcode == ISD::ATOMIC_LAS || Opcode == ISD::ATOMIC_SWAP)
+ && "Invalid Atomic Op");
+ SDVTList VTs = getVTList(Val.getValueType(), MVT::Other);
+ FoldingSetNodeID ID;
+ SDOperand Ops[] = {Chain, Ptr, Val};
+ AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
+ ID.AddInteger((unsigned int)VT);
+ void* IP = 0;
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ return SDOperand(E, 0);
+ SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Val, VT);
+ CSEMap.InsertNode(N, IP);
+ AllNodes.push_back(N);
+ return SDOperand(N, 0);
+}
+SDOperand
+SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ MVT::ValueType VT, SDOperand Chain,
+ SDOperand Ptr, SDOperand Offset,
+ const Value *SV, int SVOffset, MVT::ValueType EVT,
+ bool isVolatile, unsigned Alignment) {
if (Alignment == 0) { // Ensure that codegen never sees alignment 0
const Type *Ty = 0;
if (VT != MVT::iPTR) {
const PointerType *PT = dyn_cast<PointerType>(SV->getType());
assert(PT && "Value for load must be a pointer");
Ty = PT->getElementType();
- }
+ }
assert(Ty && "Could not get type information for load");
Alignment = TLI.getTargetData()->getABITypeAlignment(Ty);
}
- SDVTList VTs = getVTList(VT, MVT::Other);
- SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType());
- SDOperand Ops[] = { Chain, Ptr, Undef };
+
+ if (VT == EVT) {
+ ExtType = ISD::NON_EXTLOAD;
+ } else if (ExtType == ISD::NON_EXTLOAD) {
+ assert(VT == EVT && "Non-extending load from different memory type!");
+ } else {
+ // Extending load.
+ if (MVT::isVector(VT))
+ assert(EVT == MVT::getVectorElementType(VT) && "Invalid vector extload!");
+ else
+ assert(MVT::getSizeInBits(EVT) < MVT::getSizeInBits(VT) &&
+ "Should only be an extending load, not truncating!");
+ assert((ExtType == ISD::EXTLOAD || MVT::isInteger(VT)) &&
+ "Cannot sign/zero extend a FP/Vector load!");
+ assert(MVT::isInteger(VT) == MVT::isInteger(EVT) &&
+ "Cannot convert from FP to Int or Int -> FP!");
+ }
+
+ bool Indexed = AM != ISD::UNINDEXED;
+ assert(Indexed || Offset.getOpcode() == ISD::UNDEF &&
+ "Unindexed load with an offset!");
+
+ SDVTList VTs = Indexed ?
+ getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
+ SDOperand Ops[] = { Chain, Ptr, Offset };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
- ID.AddInteger(ISD::UNINDEXED);
+ ID.AddInteger(AM);
ID.AddInteger(ExtType);
ID.AddInteger((unsigned int)EVT);
ID.AddInteger(Alignment);
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDOperand(E, 0);
- SDNode *N = new LoadSDNode(Ops, VTs, ISD::UNINDEXED, ExtType, EVT,
- SV, SVOffset, Alignment, isVolatile);
+ SDNode *N = new LoadSDNode(Ops, VTs, AM, ExtType, EVT, SV, SVOffset,
+ Alignment, isVolatile);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDOperand(N, 0);
}
+SDOperand SelectionDAG::getLoad(MVT::ValueType VT,
+ SDOperand Chain, SDOperand Ptr,
+ const Value *SV, int SVOffset,
+ bool isVolatile, unsigned Alignment) {
+ SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType());
+ return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, Chain, Ptr, Undef,
+ SV, SVOffset, VT, isVolatile, Alignment);
+}
+
+SDOperand SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, MVT::ValueType VT,
+ SDOperand Chain, SDOperand Ptr,
+ const Value *SV,
+ int SVOffset, MVT::ValueType EVT,
+ bool isVolatile, unsigned Alignment) {
+ SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType());
+ return getLoad(ISD::UNINDEXED, ExtType, VT, Chain, Ptr, Undef,
+ SV, SVOffset, EVT, isVolatile, Alignment);
+}
+
SDOperand
SelectionDAG::getIndexedLoad(SDOperand OrigLoad, SDOperand Base,
SDOperand Offset, ISD::MemIndexedMode AM) {
LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
"Load is already a indexed load!");
- MVT::ValueType VT = OrigLoad.getValueType();
- SDVTList VTs = getVTList(VT, Base.getValueType(), MVT::Other);
- SDOperand Ops[] = { LD->getChain(), Base, Offset };
- FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
- ID.AddInteger(AM);
- ID.AddInteger(LD->getExtensionType());
- ID.AddInteger((unsigned int)(LD->getMemoryVT()));
- ID.AddInteger(LD->getAlignment());
- ID.AddInteger(LD->isVolatile());
- void *IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
- return SDOperand(E, 0);
- SDNode *N = new LoadSDNode(Ops, VTs, AM,
- LD->getExtensionType(), LD->getMemoryVT(),
- LD->getSrcValue(), LD->getSrcValueOffset(),
- LD->getAlignment(), LD->isVolatile());
- CSEMap.InsertNode(N, IP);
- AllNodes.push_back(N);
- return SDOperand(N, 0);
+ return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(),
+ LD->getChain(), Base, Offset, LD->getSrcValue(),
+ LD->getSrcValueOffset(), LD->getMemoryVT(),
+ LD->isVolatile(), LD->getAlignment());
}
SDOperand SelectionDAG::getStore(SDOperand Chain, SDOperand Val,
RemoveNodeFromCSEMaps(N);
// Now we update the operands.
- N->OperandList[0].Val->removeUser(N);
- Op.Val->addUser(N);
+ N->OperandList[0].Val->removeUser(0, N);
N->OperandList[0] = Op;
+ N->OperandList[0].setUser(N);
+ Op.Val->addUser(0, N);
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
// Now we update the operands.
if (N->OperandList[0] != Op1) {
- N->OperandList[0].Val->removeUser(N);
- Op1.Val->addUser(N);
+ N->OperandList[0].Val->removeUser(0, N);
N->OperandList[0] = Op1;
+ N->OperandList[0].setUser(N);
+ Op1.Val->addUser(0, N);
}
if (N->OperandList[1] != Op2) {
- N->OperandList[1].Val->removeUser(N);
- Op2.Val->addUser(N);
+ N->OperandList[1].Val->removeUser(1, N);
N->OperandList[1] = Op2;
+ N->OperandList[1].setUser(N);
+ Op2.Val->addUser(1, N);
}
// If this gets put into a CSE map, add it.
return UpdateNodeOperands(N, Ops, 5);
}
-
SDOperand SelectionDAG::
UpdateNodeOperands(SDOperand InN, SDOperand *Ops, unsigned NumOps) {
SDNode *N = InN.Val;
// Now we update the operands.
for (unsigned i = 0; i != NumOps; ++i) {
if (N->OperandList[i] != Ops[i]) {
- N->OperandList[i].Val->removeUser(N);
- Ops[i].Val->addUser(N);
+ N->OperandList[i].Val->removeUser(i, N);
N->OperandList[i] = Ops[i];
+ N->OperandList[i].setUser(N);
+ Ops[i].Val->addUser(i, N);
}
}
return InN;
}
-
/// MorphNodeTo - This frees the operands of the current node, resets the
/// opcode, types, and operands to the specified value. This should only be
/// used by the SelectionDAG class.
// Clear the operands list, updating used nodes to remove this from their
// use list.
for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
- I->Val->removeUser(this);
+ I->Val->removeUser(std::distance(op_begin(), I), this);
// If NumOps is larger than the # of operands we currently have, reallocate
// the operand list.
if (NumOps > NumOperands) {
- if (OperandsNeedDelete)
+ if (OperandsNeedDelete) {
delete [] OperandList;
+ }
OperandList = new SDOperand[NumOps];
OperandsNeedDelete = true;
}
for (unsigned i = 0, e = NumOps; i != e; ++i) {
OperandList[i] = Ops[i];
+ OperandList[i].setUser(this);
SDNode *N = OperandList[i].Val;
- N->Uses.push_back(this);
+ N->addUser(i, this);
+ ++N->UsesSize;
}
}
Ops, NumOps).Val;
}
+/// getNodeIfExists - Get the specified node if it's already available, or
+/// else return NULL.
+SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
+ const SDOperand *Ops, unsigned NumOps) {
+ if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
+ FoldingSetNodeID ID;
+ AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
+ void *IP = 0;
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ return E;
+ }
+ return NULL;
+}
+
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG.
assert(From->getNumValues() == 1 && FromN.ResNo == 0 &&
"Cannot replace with this method!");
assert(From != To.Val && "Cannot replace uses of with self");
-
+
+ SmallSetVector<SDNode*, 16> Users;
while (!From->use_empty()) {
- // Process users until they are all gone.
- SDNode *U = *From->use_begin();
-
+ SDNode::use_iterator UI = From->use_begin();
+ SDNode *U = UI->getUser();
+
+ // Remember that this node is about to morph.
+ if (Users.count(U))
+ continue;
+ Users.insert(U);
// This node is about to morph, remove its old self from the CSE maps.
RemoveNodeFromCSEMaps(U);
-
- for (SDOperand *I = U->OperandList, *E = U->OperandList+U->NumOperands;
- I != E; ++I)
+ int operandNum = 0;
+ for (SDNode::op_iterator I = U->op_begin(), E = U->op_end();
+ I != E; ++I, ++operandNum)
if (I->Val == From) {
- From->removeUser(U);
+ From->removeUser(operandNum, U);
*I = To;
- To.Val->addUser(U);
- }
+ I->setUser(U);
+ To.Val->addUser(operandNum, U);
+ }
// Now that we have modified U, add it back to the CSE maps. If it already
// exists there, recursively merge the results together.
return ReplaceAllUsesWith(SDOperand(From, 0), SDOperand(To, 0),
UpdateListener);
+ SmallSetVector<SDNode*, 16> Users;
while (!From->use_empty()) {
- // Process users until they are all gone.
- SDNode *U = *From->use_begin();
-
+ SDNode::use_iterator UI = From->use_begin();
+ SDNode *U = UI->getUser();
+
+ // Remember that this node is about to morph.
+ if (Users.count(U))
+ continue;
+ Users.insert(U);
// This node is about to morph, remove its old self from the CSE maps.
RemoveNodeFromCSEMaps(U);
-
- for (SDOperand *I = U->OperandList, *E = U->OperandList+U->NumOperands;
- I != E; ++I)
+ int operandNum = 0;
+ for (SDNode::op_iterator I = U->op_begin(), E = U->op_end();
+ I != E; ++I, ++operandNum)
if (I->Val == From) {
- From->removeUser(U);
+ From->removeUser(operandNum, U);
I->Val = To;
- To->addUser(U);
+ To->addUser(operandNum, U);
}
-
+
// Now that we have modified U, add it back to the CSE maps. If it already
// exists there, recursively merge the results together.
if (SDNode *Existing = AddNonLeafNodeToCSEMaps(U)) {
if (From->getNumValues() == 1) // Handle the simple case efficiently.
return ReplaceAllUsesWith(SDOperand(From, 0), To[0], UpdateListener);
+ SmallSetVector<SDNode*, 16> Users;
while (!From->use_empty()) {
- // Process users until they are all gone.
- SDNode *U = *From->use_begin();
-
+ SDNode::use_iterator UI = From->use_begin();
+ SDNode *U = UI->getUser();
+
+ // Remember that this node is about to morph.
+ if (Users.count(U))
+ continue;
+ Users.insert(U);
// This node is about to morph, remove its old self from the CSE maps.
RemoveNodeFromCSEMaps(U);
-
- for (SDOperand *I = U->OperandList, *E = U->OperandList+U->NumOperands;
- I != E; ++I)
+ int operandNum = 0;
+ for (SDNode::op_iterator I = U->op_begin(), E = U->op_end();
+ I != E; ++I, ++operandNum)
if (I->Val == From) {
const SDOperand &ToOp = To[I->ResNo];
- From->removeUser(U);
+ From->removeUser(operandNum, U);
*I = ToOp;
- ToOp.Val->addUser(U);
+ I->setUser(U);
+ ToOp.Val->addUser(operandNum, U);
}
-
+
// Now that we have modified U, add it back to the CSE maps. If it already
// exists there, recursively merge the results together.
if (SDNode *Existing = AddNonLeafNodeToCSEMaps(U)) {
ChainedSetUpdaterListener(SmallSetVector<SDNode*, 16> &set,
SelectionDAG::DAGUpdateListener *chain)
: Set(set), Chain(chain) {}
-
+
virtual void NodeDeleted(SDNode *N) {
Set.remove(N);
if (Chain) Chain->NodeDeleted(N);
// Get all of the users of From.Val. We want these in a nice,
// deterministically ordered and uniqued set, so we use a SmallSetVector.
- SmallSetVector<SDNode*, 16> Users(From.Val->use_begin(), From.Val->use_end());
+ SmallSetVector<SDNode*, 16> Users;
+ for (SDNode::use_iterator UI = From.Val->use_begin(),
+ E = From.Val->use_end(); UI != E; ++UI) {
+ SDNode *User = UI->getUser();
+ if (!Users.count(User))
+ Users.insert(User);
+ }
// When one of the recursive merges deletes nodes from the graph, we need to
// make sure that UpdateListener is notified *and* that the node is removed
Users.pop_back();
// Scan for an operand that matches From.
- SDOperand *Op = User->OperandList, *E = User->OperandList+User->NumOperands;
+ SDNode::op_iterator Op = User->op_begin(), E = User->op_end();
for (; Op != E; ++Op)
if (*Op == From) break;
// Update all operands that match "From" in case there are multiple uses.
for (; Op != E; ++Op) {
if (*Op == From) {
- From.Val->removeUser(User);
- *Op = To;
- To.Val->addUser(User);
+ From.Val->removeUser(Op-User->op_begin(), User);
+ *Op = To;
+ Op->setUser(User);
+ To.Val->addUser(Op-User->op_begin(), User);
}
}
void RegisterSDNode::ANCHOR() {}
void ExternalSymbolSDNode::ANCHOR() {}
void CondCodeSDNode::ANCHOR() {}
+void ARG_FLAGSSDNode::ANCHOR() {}
void VTSDNode::ANCHOR() {}
void LoadSDNode::ANCHOR() {}
void StoreSDNode::ANCHOR() {}
+void AtomicSDNode::ANCHOR() {}
HandleSDNode::~HandleSDNode() {
SDVTList VTs = { 0, 0 };
SmallPtrSet<SDNode*, 32> UsersHandled;
- for (SDNode::use_iterator UI = Uses.begin(), E = Uses.end(); UI != E; ++UI) {
- SDNode *User = *UI;
- if (User->getNumOperands() == 1 ||
- UsersHandled.insert(User)) // First time we've seen this?
- for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i)
- if (User->getOperand(i) == TheValue) {
- if (NUses == 0)
- return false; // too many uses
- --NUses;
- }
+ // TODO: Only iterate over uses of a given value of the node
+ for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
+ if (*UI == TheValue) {
+ if (NUses == 0)
+ return false;
+ --NUses;
+ }
}
// Found exactly the right number of uses?
SmallPtrSet<SDNode*, 32> UsersHandled;
- for (SDNode::use_iterator UI = Uses.begin(), E = Uses.end(); UI != E; ++UI) {
- SDNode *User = *UI;
+ for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
+ SDNode *User = UI->getUser();
if (User->getNumOperands() == 1 ||
UsersHandled.insert(User)) // First time we've seen this?
for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i)
}
-/// isOnlyUse - Return true if this node is the only use of N.
+/// isOnlyUseOf - Return true if this node is the only use of N.
///
-bool SDNode::isOnlyUse(SDNode *N) const {
+bool SDNode::isOnlyUseOf(SDNode *N) const {
bool Seen = false;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
- SDNode *User = *I;
+ SDNode *User = I->getUser();
if (User == this)
Seen = true;
else
/// isOperand - Return true if this node is an operand of N.
///
-bool SDOperand::isOperand(SDNode *N) const {
+bool SDOperandImpl::isOperandOf(SDNode *N) const {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
if (*this == N->getOperand(i))
return true;
return false;
}
-bool SDNode::isOperand(SDNode *N) const {
+bool SDNode::isOperandOf(SDNode *N) const {
for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
if (this == N->OperandList[i].Val)
return true;
/// side-effecting instructions. In practice, this looks through token
/// factors and non-volatile loads. In order to remain efficient, this only
/// looks a couple of nodes in, it does not do an exhaustive search.
-bool SDOperand::reachesChainWithoutSideEffects(SDOperand Dest,
+bool SDOperandImpl::reachesChainWithoutSideEffects(SDOperandImpl Dest,
unsigned Depth) const {
if (*this == Dest) return true;
}
}
-/// isPredecessor - Return true if this node is a predecessor of N. This node
+/// isPredecessorOf - Return true if this node is a predecessor of N. This node
/// is either an operand of N or it can be reached by recursively traversing
/// up the operands.
/// NOTE: this is an expensive method. Use it carefully.
-bool SDNode::isPredecessor(SDNode *N) const {
+bool SDNode::isPredecessorOf(SDNode *N) const {
SmallPtrSet<SDNode *, 32> Visited;
bool found = false;
findPredecessor(N, this, found, Visited);
return "<<Unknown Target Node>>";
}
+ case ISD::PREFETCH: return "Prefetch";
+ case ISD::MEMBARRIER: return "MemBarrier";
+ case ISD::ATOMIC_LCS: return "AtomicLCS";
+ case ISD::ATOMIC_LAS: return "AtomicLAS";
+ case ISD::ATOMIC_SWAP: return "AtomicSWAP";
case ISD::PCMARKER: return "PCMarker";
case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
case ISD::SRCVALUE: return "SrcValue";
case ISD::STRING: return "String";
case ISD::BasicBlock: return "BasicBlock";
+ case ISD::ARG_FLAGS: return "ArgFlags";
case ISD::VALUETYPE: return "ValueType";
case ISD::Register: return "Register";
}
}
+std::string ISD::ArgFlagsTy::getArgFlagsString() {
+ std::string S = "< ";
+
+ if (isZExt())
+ S += "zext ";
+ if (isSExt())
+ S += "sext ";
+ if (isInReg())
+ S += "inreg ";
+ if (isSRet())
+ S += "sret ";
+ if (isByVal())
+ S += "byval ";
+ if (isNest())
+ S += "nest ";
+ if (getByValAlign())
+ S += "byval-align:" + utostr(getByValAlign()) + " ";
+ if (getOrigAlign())
+ S += "orig-align:" + utostr(getOrigAlign()) + " ";
+ if (getByValSize())
+ S += "byval-size:" + utostr(getByValSize()) + " ";
+ return S + ">";
+}
+
void SDNode::dump() const { dump(0); }
void SDNode::dump(const SelectionDAG *G) const {
cerr << (void*)this << ": ";
cerr << LBB->getName() << " ";
cerr << (const void*)BBDN->getBasicBlock() << ">";
} else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
- if (G && R->getReg() && MRegisterInfo::isPhysicalRegister(R->getReg())) {
- cerr << " " <<G->getTarget().getRegisterInfo()->getName(R->getReg());
+ if (G && R->getReg() &&
+ TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
+ cerr << " " << G->getTarget().getRegisterInfo()->getName(R->getReg());
} else {
cerr << " #" << R->getReg();
}
cerr << "<" << M->MO.getValue() << ":" << M->MO.getOffset() << ">";
else
cerr << "<null:" << M->MO.getOffset() << ">";
+ } else if (const ARG_FLAGSSDNode *N = dyn_cast<ARG_FLAGSSDNode>(this)) {
+ cerr << N->getArgFlags().getArgFlagsString();
} else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
cerr << ":" << MVT::getValueTypeString(N->getVT());
} else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {