memset(OpActions, 0, sizeof(OpActions));
memset(LoadXActions, 0, sizeof(LoadXActions));
memset(&StoreXActions, 0, sizeof(StoreXActions));
+ // Initialize all indexed load / store to expand.
+ for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
+ for (unsigned IM = (unsigned)ISD::PRE_INC;
+ IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
+ setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand);
+ setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand);
+ }
+ }
IsLittleEndian = TD->isLittleEndian();
UsesGlobalOffsetTable = false;
sizeof(TargetDAGCombineArray)/sizeof(TargetDAGCombineArray[0]));
maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
allowUnalignedMemoryAccesses = false;
- UseUnderscoreSetJmpLongJmp = false;
+ UseUnderscoreSetJmp = false;
+ UseUnderscoreLongJmp = false;
IntDivIsCheap = false;
Pow2DivIsCheap = false;
StackPointerRegisterToSaveRestore = 0;
assert(VT < PromoteTo && "Must promote to a larger type!");
TransformToType[VT] = PromoteTo;
} else if (Action == TargetLowering::Expand) {
- assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 &&
- "Cannot expand this type: target must support SOME integer reg!");
- // Expand to the next smaller integer type!
- TransformToType[VT] = (MVT::ValueType)(VT-1);
+ // f32 and f64 is each expanded to corresponding integer type of same size.
+ if (VT == MVT::f32)
+ TransformToType[VT] = MVT::i32;
+ else if (VT == MVT::f64)
+ TransformToType[VT] = MVT::i64;
+ else {
+ assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 &&
+ "Cannot expand this type: target must support SOME integer reg!");
+ // Expand to the next smaller integer type!
+ TransformToType[VT] = (MVT::ValueType)(VT-1);
+ }
}
}
else
TransformToType[(MVT::ValueType)IntReg] = (MVT::ValueType)IntReg;
- // If the target does not have native support for F32, promote it to F64.
- if (!isTypeLegal(MVT::f32))
- SetValueTypeAction(MVT::f32, Promote, *this,
- TransformToType, ValueTypeActions);
- else
+ // If the target does not have native F64 support, expand it to I64. We will
+ // be generating soft float library calls. If the target does not have native
+ // support for F32, promote it to F64 if it is legal. Otherwise, expand it to
+ // I32.
+ if (isTypeLegal(MVT::f64))
+ TransformToType[MVT::f64] = MVT::f64;
+ else {
+ NumElementsForVT[MVT::f64] = NumElementsForVT[MVT::i64];
+ SetValueTypeAction(MVT::f64, Expand, *this, TransformToType,
+ ValueTypeActions);
+ }
+ if (isTypeLegal(MVT::f32))
TransformToType[MVT::f32] = MVT::f32;
+ else if (isTypeLegal(MVT::f64))
+ SetValueTypeAction(MVT::f32, Promote, *this, TransformToType,
+ ValueTypeActions);
+ else {
+ NumElementsForVT[MVT::f32] = NumElementsForVT[MVT::i32];
+ SetValueTypeAction(MVT::f32, Expand, *this, TransformToType,
+ ValueTypeActions);
+ }
// Set MVT::Vector to always be Expanded
SetValueTypeAction(MVT::Vector, Expand, *this, TransformToType,
if (isTypeLegal((MVT::ValueType)i))
TransformToType[i] = (MVT::ValueType)i;
}
-
- assert(isTypeLegal(MVT::f64) && "Target does not support FP?");
- TransformToType[MVT::f64] = MVT::f64;
}
const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
return TLO.CombineTo(Op, Op.getOperand(0));
if ((DemandedMask & KnownZero2) == DemandedMask)
return TLO.CombineTo(Op, Op.getOperand(1));
+
+ // If all of the unknown bits are known to be zero on one side or the other
+ // (but not both) turn this into an *inclusive* or.
+ // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+ if ((DemandedMask & ~KnownZero & ~KnownZero2) == 0)
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(),
+ Op.getOperand(0),
+ Op.getOperand(1)));
// Output known-0 bits are known if clear or set in both the LHS & RHS.
KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
- // If all of the unknown bits are known to be zero on one side or the other
- // (but not both) turn this into an *inclusive* or.
- // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
- if (uint64_t UnknownBits = DemandedMask & ~(KnownZeroOut|KnownOneOut))
- if ((UnknownBits & (KnownZero|KnownZero2)) == UnknownBits)
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(),
- Op.getOperand(0),
- Op.getOperand(1)));
// If all of the demanded bits on one side are known, and all of the set
// bits on that side are also known to be set on the other side, turn this
// into an AND, as we know the bits will be cleared.