case ISD::BUILD_VECTOR:
// If this is a case we can't handle, return null and let the default
// expansion code take care of it. If we CAN select this case, return Op.
-
- // See if this is all zeros.
+
// FIXME: We should handle splat(-0.0), and other cases here.
- if (ISD::isBuildVectorAllZeros(Op.Val))
+
+ // See if this is all zeros.
+ if (ISD::isBuildVectorAllZeros(Op.Val)) {
+ // Canonicalize all zero vectors to be v4i32.
+ if (Op.getValueType() != MVT::v4i32) {
+ SDOperand Z = DAG.getConstant(0, MVT::i32);
+ Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
+ Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
+ }
return Op;
+ }
if (PPC::get_VSPLI_elt(Op.Val, 1, DAG).Val || // vspltisb
PPC::get_VSPLI_elt(Op.Val, 2, DAG).Val || // vspltish
def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
"vxor $vD, $vD, $vD", VecFP,
- [(set VRRC:$vD, (v4f32 immAllZerosV))]>;
+ [(set VRRC:$vD, (v4i32 immAllZerosV))]>;
}
//===----------------------------------------------------------------------===//
def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
-def : Pat<(v16i8 immAllZerosV), (v16i8 (V_SET0))>;
-def : Pat<(v8i16 immAllZerosV), (v8i16 (V_SET0))>;
-def : Pat<(v4i32 immAllZerosV), (v4i32 (V_SET0))>;
// Loads.
def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
(v8i16 (VANDC VRRC:$A, VRRC:$B))>;
def : Pat<(fmul VRRC:$vA, VRRC:$vB),
- (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
+ (VMADDFP VRRC:$vA, VRRC:$vB, (v4i32 (V_SET0)))>;
// Fused multiply add and multiply sub for packed float. These are represented
// separately from the real instructions above, for operations that must have