/// \brief Try to get a scalar value for a specific element of a vector.
///
/// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
-static SDValue getScalarValueForVectorElement(SDValue V, int Idx) {
+static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
+ SelectionDAG &DAG) {
+ MVT VT = V.getSimpleValueType();
+ MVT EltVT = VT.getVectorElementType();
+ while (V.getOpcode() == ISD::BITCAST)
+ V = V.getOperand(0);
+ // If the bitcasts shift the element size, we can't extract an equivalent
+ // element from it.
+ MVT NewVT = V.getSimpleValueType();
+ if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
+ return SDValue();
+
if (V.getOpcode() == ISD::BUILD_VECTOR ||
(Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
- return V.getOperand(Idx);
+ return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
return SDValue();
}
+/// \brief Helper to test for a load that can be folded with x86 shuffles.
+///
+/// This is particularly important because the set of instructions varies
+/// significantly based on whether the operand is a load or not.
+static bool isShuffleFoldableLoad(SDValue V) {
+ while (V.getOpcode() == ISD::BITCAST)
+ V = V.getOperand(0);
+
+ return ISD::isNON_EXTLoad(V.getNode());
+}
+
/// \brief Try to lower insertion of a single element into a zero vector.
///
/// This is a common pattern that we have especially efficient patterns to lower
// all the smarts here sunk into that routine. However, the current
// lowering of BUILD_VECTOR makes that nearly impossible until the old
// vector shuffle lowering is dead.
- SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size());
+ SDValue V2S =
+ getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(), DAG);
if (!V2S)
return SDValue();
// If the scalar isn't a load we can't broadcast from it in AVX1, only with
// AVX2.
- if (!Subtarget->hasAVX2() && !ISD::isNON_EXTLoad(V.getNode()))
+ if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
return SDValue();
} else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
// We can't broadcast from a vector register w/o AVX2, and we can only
// Try to use one of the special instruction patterns to handle two common
// blend patterns if a zero-blend above didn't work.
if (isShuffleEquivalent(Mask, 0, 3) || isShuffleEquivalent(Mask, 1, 3))
- if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0]))
+ if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
// We can either use a special instruction to load over the low double or
// to move just the low double.
- return DAG.getNode(ISD::isNON_EXTLoad(V1S.getNode()) ? X86ISD::MOVLPD
- : X86ISD::MOVSD,
- DL, MVT::v2f64, V2, V1S);
+ return DAG.getNode(
+ isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
+ DL, MVT::v2f64, V2,
+ DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
if (Subtarget->hasSSE41())
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
; SSE2-LABEL: insert_reg_lo_v2i64:
; SSE2: # BB#0:
; SSE2-NEXT: movd %rdi, %xmm1
-; SSE2-NEXT: shufpd {{.*#+}} xmm1 = xmm1[0],xmm0[1]
-; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: movsd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_reg_lo_v2i64:
; SSE3: # BB#0:
; SSE3-NEXT: movd %rdi, %xmm1
-; SSE3-NEXT: shufpd {{.*#+}} xmm1 = xmm1[0],xmm0[1]
-; SSE3-NEXT: movapd %xmm1, %xmm0
+; SSE3-NEXT: movsd %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_reg_lo_v2i64:
; SSSE3: # BB#0:
; SSSE3-NEXT: movd %rdi, %xmm1
-; SSSE3-NEXT: shufpd {{.*#+}} xmm1 = xmm1[0],xmm0[1]
-; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: movsd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_reg_lo_v2i64:
define <2 x i64> @insert_mem_lo_v2i64(i64* %ptr, <2 x i64> %b) {
; SSE2-LABEL: insert_mem_lo_v2i64:
; SSE2: # BB#0:
-; SSE2-NEXT: movq (%rdi), %xmm1
-; SSE2-NEXT: shufpd {{.*#+}} xmm1 = xmm1[0],xmm0[1]
-; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: movlpd (%rdi), %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_mem_lo_v2i64:
; SSE3: # BB#0:
-; SSE3-NEXT: movq (%rdi), %xmm1
-; SSE3-NEXT: shufpd {{.*#+}} xmm1 = xmm1[0],xmm0[1]
-; SSE3-NEXT: movapd %xmm1, %xmm0
+; SSE3-NEXT: movlpd (%rdi), %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_mem_lo_v2i64:
; SSSE3: # BB#0:
-; SSSE3-NEXT: movq (%rdi), %xmm1
-; SSSE3-NEXT: shufpd {{.*#+}} xmm1 = xmm1[0],xmm0[1]
-; SSSE3-NEXT: movapd %xmm1, %xmm0
+; SSSE3-NEXT: movlpd (%rdi), %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: insert_mem_lo_v2i64: