"Fixed size array in TargetLowering is not large enough!");
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
+ memset(LoadXActions, 0, sizeof(LoadXActions));
+ memset(&StoreXActions, 0, sizeof(StoreXActions));
+ // Initialize all indexed load / store to expand.
+ for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
+ for (unsigned IM = (unsigned)ISD::PRE_INC;
+ IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
+ setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand);
+ setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand);
+ }
+ }
IsLittleEndian = TD->isLittleEndian();
+ UsesGlobalOffsetTable = false;
ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD->getIntPtrType());
ShiftAmtHandling = Undefined;
memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
sizeof(TargetDAGCombineArray)/sizeof(TargetDAGCombineArray[0]));
maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
allowUnalignedMemoryAccesses = false;
- UseUnderscoreSetJmpLongJmp = false;
+ UseUnderscoreSetJmp = false;
+ UseUnderscoreLongJmp = false;
IntDivIsCheap = false;
Pow2DivIsCheap = false;
StackPointerRegisterToSaveRestore = 0;
SchedPreferenceInfo = SchedulingForLatency;
+ JumpBufSize = 0;
+ JumpBufAlignment = 0;
}
TargetLowering::~TargetLowering() {}
assert(VT < PromoteTo && "Must promote to a larger type!");
TransformToType[VT] = PromoteTo;
} else if (Action == TargetLowering::Expand) {
- assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 &&
- "Cannot expand this type: target must support SOME integer reg!");
- // Expand to the next smaller integer type!
- TransformToType[VT] = (MVT::ValueType)(VT-1);
+ // f32 and f64 is each expanded to corresponding integer type of same size.
+ if (VT == MVT::f32)
+ TransformToType[VT] = MVT::i32;
+ else if (VT == MVT::f64)
+ TransformToType[VT] = MVT::i64;
+ else {
+ assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 &&
+ "Cannot expand this type: target must support SOME integer reg!");
+ // Expand to the next smaller integer type!
+ TransformToType[VT] = (MVT::ValueType)(VT-1);
+ }
}
}
else
TransformToType[(MVT::ValueType)IntReg] = (MVT::ValueType)IntReg;
- // If the target does not have native support for F32, promote it to F64.
- if (!isTypeLegal(MVT::f32))
- SetValueTypeAction(MVT::f32, Promote, *this,
- TransformToType, ValueTypeActions);
- else
+ // If the target does not have native F64 support, expand it to I64. We will
+ // be generating soft float library calls. If the target does not have native
+ // support for F32, promote it to F64 if it is legal. Otherwise, expand it to
+ // I32.
+ if (isTypeLegal(MVT::f64))
+ TransformToType[MVT::f64] = MVT::f64;
+ else {
+ NumElementsForVT[MVT::f64] = NumElementsForVT[MVT::i64];
+ SetValueTypeAction(MVT::f64, Expand, *this, TransformToType,
+ ValueTypeActions);
+ }
+ if (isTypeLegal(MVT::f32))
TransformToType[MVT::f32] = MVT::f32;
+ else if (isTypeLegal(MVT::f64))
+ SetValueTypeAction(MVT::f32, Promote, *this, TransformToType,
+ ValueTypeActions);
+ else {
+ NumElementsForVT[MVT::f32] = NumElementsForVT[MVT::i32];
+ SetValueTypeAction(MVT::f32, Expand, *this, TransformToType,
+ ValueTypeActions);
+ }
// Set MVT::Vector to always be Expanded
SetValueTypeAction(MVT::Vector, Expand, *this, TransformToType,
if (isTypeLegal((MVT::ValueType)i))
TransformToType[i] = (MVT::ValueType)i;
}
-
- assert(isTypeLegal(MVT::f64) && "Target does not support FP?");
- TransformToType[MVT::f64] = MVT::f64;
}
const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
return TLO.CombineTo(Op, Op.getOperand(0));
if ((DemandedMask & KnownZero2) == DemandedMask)
return TLO.CombineTo(Op, Op.getOperand(1));
+
+ // If all of the unknown bits are known to be zero on one side or the other
+ // (but not both) turn this into an *inclusive* or.
+ // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+ if ((DemandedMask & ~KnownZero & ~KnownZero2) == 0)
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(),
+ Op.getOperand(0),
+ Op.getOperand(1)));
// Output known-0 bits are known if clear or set in both the LHS & RHS.
KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
- // If all of the unknown bits are known to be zero on one side or the other
- // (but not both) turn this into an *inclusive* or.
- // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
- if (uint64_t UnknownBits = DemandedMask & ~(KnownZeroOut|KnownOneOut))
- if ((UnknownBits & (KnownZero|KnownZero2)) == UnknownBits)
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(),
- Op.getOperand(0),
- Op.getOperand(1)));
// If all of the demanded bits on one side are known, and all of the set
// bits on that side are also known to be set on the other side, turn this
// into an AND, as we know the bits will be cleared.
}
break;
case ISD::SIGN_EXTEND_INREG: {
- MVT::ValueType VT = Op.getValueType();
MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
// Sign extension. Compute the demanded bits in the result that are not
KnownOne = 0;
break;
}
- case ISD::ZEXTLOAD: {
- MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(3))->getVT();
- KnownZero |= ~MVT::getIntVTBitMask(VT) & DemandedMask;
+ case ISD::LOAD: {
+ if (ISD::isZEXTLoad(Op.Val)) {
+ LoadSDNode *LD = cast<LoadSDNode>(Op);
+ MVT::ValueType VT = LD->getLoadedVT();
+ KnownZero |= ~MVT::getIntVTBitMask(VT) & DemandedMask;
+ }
break;
}
case ISD::ZERO_EXTEND: {
}
return;
case ISD::SIGN_EXTEND_INREG: {
- MVT::ValueType VT = Op.getValueType();
MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
// Sign extension. Compute the demanded bits in the result that are not
KnownOne = 0;
return;
}
- case ISD::ZEXTLOAD: {
- MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(3))->getVT();
- KnownZero |= ~MVT::getIntVTBitMask(VT) & Mask;
+ case ISD::LOAD: {
+ if (ISD::isZEXTLoad(Op.Val)) {
+ LoadSDNode *LD = cast<LoadSDNode>(Op);
+ MVT::ValueType VT = LD->getLoadedVT();
+ KnownZero |= ~MVT::getIntVTBitMask(VT) & Mask;
+ }
return;
}
case ISD::ZERO_EXTEND: {
case ISD::AssertZext:
Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
return VTBits-Tmp;
-
- case ISD::SEXTLOAD: // '17' bits known
- Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(3))->getVT());
- return VTBits-Tmp+1;
- case ISD::ZEXTLOAD: // '16' bits known
- Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(3))->getVT());
- return VTBits-Tmp;
case ISD::Constant: {
uint64_t Val = cast<ConstantSDNode>(Op)->getValue();
break;
}
+ // Handle LOADX separately here. EXTLOAD case will fallthrough.
+ if (Op.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(Op);
+ unsigned ExtType = LD->getExtensionType();
+ switch (ExtType) {
+ default: break;
+ case ISD::SEXTLOAD: // '17' bits known
+ Tmp = MVT::getSizeInBits(LD->getLoadedVT());
+ return VTBits-Tmp+1;
+ case ISD::ZEXTLOAD: // '16' bits known
+ Tmp = MVT::getSizeInBits(LD->getLoadedVT());
+ return VTBits-Tmp;
+ }
+ }
+
// Allow the target to implement this method for its nodes.
if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
}
}
-bool TargetLowering::isOperandValidForConstraint(SDOperand Op,
- char ConstraintLetter) {
+/// isOperandValidForConstraint - Return the specified operand (possibly
+/// modified) if the specified SDOperand is valid for the specified target
+/// constraint letter, otherwise return null.
+SDOperand TargetLowering::isOperandValidForConstraint(SDOperand Op,
+ char ConstraintLetter,
+ SelectionDAG &DAG) {
switch (ConstraintLetter) {
- default: return false;
+ default: return SDOperand(0,0);
case 'i': // Simple Integer or Relocatable Constant
case 'n': // Simple Integer
case 's': // Relocatable Constant
- return true; // FIXME: not right.
+ return Op; // FIXME: not right.
}
}
-
std::vector<unsigned> TargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {