X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86InstrSSE.td;h=3c1db6714621fa785b504562692aadd2c9918e82;hb=dad9c5a14ff29cf4b5f7d7352da8f15337bae51f;hp=b072df0acc47135b01cdeb4332e27ad98a0092a0;hpb=2dadaea5d2613125c0f7de33febdc44d97ae9648;p=oota-llvm.git diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index b072df0acc4..3c1db671462 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -29,8 +29,6 @@ def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest, [SDNPOutFlag]>; def X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>; -def X86zexts2vec : SDNode<"X86ISD::ZEXT_S2VEC", - SDTypeProfile<1, 1, []>, []>; def X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>; def X86pinsrw : SDNode<"X86ISD::PINSRW", @@ -104,8 +102,8 @@ def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{ return X86::isMOVLPMask(N); }]>; -def MOVS_shuffle_mask : PatLeaf<(build_vector), [{ - return X86::isMOVSMask(N); +def MOVL_shuffle_mask : PatLeaf<(build_vector), [{ + return X86::isMOVLMask(N); }]>; def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{ @@ -528,7 +526,7 @@ def CVTSS2SDrr: I<0x5A, MRMSrcReg, (ops FR64:$dst, FR32:$src), Requires<[HasSSE2]>; def CVTSS2SDrm: I<0x5A, MRMSrcMem, (ops FR64:$dst, f32mem:$src), "cvtss2sd {$src, $dst|$dst, $src}", - [(set FR64:$dst, (fextend (loadf32 addr:$src)))]>, XS, + [(set FR64:$dst, (extload addr:$src, f32))]>, XS, Requires<[HasSSE2]>; // Match intrinsics which expect XMM operand(s). @@ -778,7 +776,7 @@ def MOVUPDmr : PDI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src), [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>; let isTwoAddress = 1 in { -let AddedComplexity = 10 in { +let AddedComplexity = 20 in { def MOVLPSrm : PSI<0x12, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2), "movlps {$src2, $dst|$dst, $src2}", [(set VR128:$dst, @@ -832,7 +830,7 @@ def MOVHPDmr : PDI<0x17, MRMDestMem, (ops f64mem:$dst, VR128:$src), addr:$dst)]>; let isTwoAddress = 1 in { -let AddedComplexity = 10 in { +let AddedComplexity = 20 in { def MOVLHPSrr : PSI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "movlhps {$src2, $dst|$dst, $src2}", [(set VR128:$dst, @@ -1245,6 +1243,7 @@ def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem, VR128:$src1, (load addr:$src2), SHUFP_shuffle_mask:$src3)))]>; +let AddedComplexity = 10 in { def UNPCKHPSrr : PSI<0x15, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "unpckhps {$src2, $dst|$dst, $src2}", @@ -1294,6 +1293,7 @@ def UNPCKLPDrm : PDI<0x14, MRMSrcMem, [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, (load addr:$src2), UNPCKL_shuffle_mask)))]>; +} // AddedComplexity } // Horizontal ops @@ -1988,7 +1988,7 @@ def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem, UNPCKH_shuffle_mask)))]>; def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), - "punpckhdq {$src2, $dst|$dst, $src2}", + "punpckhqdq {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKH_shuffle_mask)))]>; @@ -2192,19 +2192,19 @@ def MOVLSS2PSrr : SSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, FR32:$src2) "movss {$src2, $dst|$dst, $src2}", []>; def MOVLSD2PDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, FR64:$src2), "movsd {$src2, $dst|$dst, $src2}", []>; -def MOVLDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, VR128:$src1, R32:$src2), - "movd {$src2, $dst|$dst, $src2}", []>; +let AddedComplexity = 20 in { def MOVLPSrr : SSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "movss {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, - MOVS_shuffle_mask)))]>; + MOVL_shuffle_mask)))]>; def MOVLPDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "movsd {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, VR128:$src2, - MOVS_shuffle_mask)))]>; + MOVL_shuffle_mask)))]>; +} } // Store / copy lower 64-bits of a XMM register. @@ -2212,30 +2212,41 @@ def MOVLQ128mr : PDI<0xD6, MRMDestMem, (ops i64mem:$dst, VR128:$src), "movq {$src, $dst|$dst, $src}", [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>; -// FIXME: Temporary workaround since 2-wide shuffle is broken. -def MOVLQ128rr : PDI<0xD6, MRMSrcReg, (ops VR128:$dst, VR128:$src), - "movq {$src, $dst|$dst, $src}", - [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>; - // Move to lower bits of a VR128 and zeroing upper bits. // Loading from memory automatically zeroing upper bits. +let AddedComplexity = 20 in { def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (ops VR128:$dst, f32mem:$src), "movss {$src, $dst|$dst, $src}", - [(set VR128:$dst, - (v4f32 (X86zexts2vec (loadf32 addr:$src))))]>; + [(set VR128:$dst, (v4f32 (vector_shuffle immAllZerosV, + (v4f32 (scalar_to_vector (loadf32 addr:$src))), + MOVL_shuffle_mask)))]>; def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (ops VR128:$dst, f64mem:$src), "movsd {$src, $dst|$dst, $src}", - [(set VR128:$dst, - (v2f64 (X86zexts2vec (loadf64 addr:$src))))]>; + [(set VR128:$dst, (v2f64 (vector_shuffle immAllZerosV, + (v2f64 (scalar_to_vector (loadf64 addr:$src))), + MOVL_shuffle_mask)))]>; +// movd / movq to XMM register zero-extends +def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, R32:$src), + "movd {$src, $dst|$dst, $src}", + [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV, + (v4i32 (scalar_to_vector R32:$src)), + MOVL_shuffle_mask)))]>; def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (ops VR128:$dst, i32mem:$src), "movd {$src, $dst|$dst, $src}", - [(set VR128:$dst, - (v4i32 (X86zexts2vec (loadi32 addr:$src))))]>; -def MOVZQI2PQIrm : PDI<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src), - "movq {$src, $dst|$dst, $src}", - [(set VR128:$dst, - (bc_v2i64 (v2f64 (X86zexts2vec - (loadf64 addr:$src)))))]>; + [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV, + (v4i32 (scalar_to_vector (loadi32 addr:$src))), + MOVL_shuffle_mask)))]>; +// Moving from XMM to XMM but still clear upper 64 bits. +def MOVZQI2PQIrr : I<0x7E, MRMSrcReg, (ops VR128:$dst, VR128:$src), + "movq {$src, $dst|$dst, $src}", + [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>, + XS, Requires<[HasSSE2]>; +def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src), + "movq {$src, $dst|$dst, $src}", + [(set VR128:$dst, (int_x86_sse2_movl_dq + (bc_v4i32 (loadv2i64 addr:$src))))]>, + XS, Requires<[HasSSE2]>; +} //===----------------------------------------------------------------------===// // Non-Instruction Patterns @@ -2270,9 +2281,9 @@ def : Pat<(store (v4i32 VR128:$src), addr:$dst), // Scalar to v8i16 / v16i8. The source may be a R32, but only the lower 8 or // 16-bits matter. -def : Pat<(v8i16 (X86s2vec R32:$src)), (MOVDI2PDIrr R32:$src)>, +def : Pat<(v8i16 (X86s2vec R32:$src)), (v8i16 (MOVDI2PDIrr R32:$src))>, Requires<[HasSSE2]>; -def : Pat<(v16i8 (X86s2vec R32:$src)), (MOVDI2PDIrr R32:$src)>, +def : Pat<(v16i8 (X86s2vec R32:$src)), (v16i8 (MOVDI2PDIrr R32:$src))>, Requires<[HasSSE2]>; // bit_convert @@ -2337,23 +2348,31 @@ def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>, def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>, Requires<[HasSSE2]>; -// Zeroing a VR128 then do a MOVS* to the lower bits. -def : Pat<(v2f64 (X86zexts2vec FR64:$src)), - (MOVLSD2PDrr (V_SET0_PD), FR64:$src)>, Requires<[HasSSE2]>; -def : Pat<(v4f32 (X86zexts2vec FR32:$src)), - (MOVLSS2PSrr (V_SET0_PS), FR32:$src)>, Requires<[HasSSE2]>; -def : Pat<(v4i32 (X86zexts2vec R32:$src)), - (MOVLDI2PDIrr (V_SET0_PI), R32:$src)>, Requires<[HasSSE2]>; -def : Pat<(v8i16 (X86zexts2vec R16:$src)), - (MOVLDI2PDIrr (V_SET0_PI), (MOVZX32rr16 R16:$src))>, Requires<[HasSSE2]>; -def : Pat<(v16i8 (X86zexts2vec R8:$src)), - (MOVLDI2PDIrr (V_SET0_PI), (MOVZX32rr8 R8:$src))>, Requires<[HasSSE2]>; +// Move scalar to XMM zero-extended +// movd to XMM register zero-extends +let AddedComplexity = 20 in { +def : Pat<(v8i16 (vector_shuffle immAllZerosV, + (v8i16 (X86s2vec R32:$src)), MOVL_shuffle_mask)), + (v8i16 (MOVZDI2PDIrr R32:$src))>, Requires<[HasSSE2]>; +def : Pat<(v16i8 (vector_shuffle immAllZerosV, + (v16i8 (X86s2vec R32:$src)), MOVL_shuffle_mask)), + (v16i8 (MOVZDI2PDIrr R32:$src))>, Requires<[HasSSE2]>; +// Zeroing a VR128 then do a MOVS{S|D} to the lower bits. +def : Pat<(v2f64 (vector_shuffle immAllZerosV, + (v2f64 (scalar_to_vector FR64:$src)), MOVL_shuffle_mask)), + (v2f64 (MOVLSD2PDrr (V_SET0_PD), FR64:$src))>, Requires<[HasSSE2]>; +def : Pat<(v4f32 (vector_shuffle immAllZerosV, + (v4f32 (scalar_to_vector FR32:$src)), MOVL_shuffle_mask)), + (v4f32 (MOVLSS2PSrr (V_SET0_PS), FR32:$src))>, Requires<[HasSSE2]>; +} // Splat v2f64 / v2i64 +let AddedComplexity = 10 in { def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_v2_mask:$sm), (v2f64 (UNPCKLPDrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_v2_mask:$sm), (v2i64 (PUNPCKLQDQrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; +} // Splat v4f32 def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), SSE_splat_mask:$sm), @@ -2382,75 +2401,131 @@ def : Pat<(vector_shuffle (v4i32 VR128:$src1), PSHUFD_binary_shuffle_mask:$sm))>, Requires<[HasSSE2]>; // vector_shuffle v1, , <0, 0, 1, 1, ...> +let AddedComplexity = 10 in { def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef), UNPCKL_v_undef_shuffle_mask)), - (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>; + (v4f32 (UNPCKLPSrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef), UNPCKL_v_undef_shuffle_mask)), - (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>; + (v16i8 (PUNPCKLBWrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef), UNPCKL_v_undef_shuffle_mask)), - (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>; + (v8i16 (PUNPCKLWDrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef), UNPCKL_v_undef_shuffle_mask)), - (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>; + (v4i32 (PUNPCKLDQrr VR128:$src, VR128:$src))>, Requires<[HasSSE1]>; +} +let AddedComplexity = 20 in { // vector_shuffle v1, <1, 1, 3, 3> def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef), MOVSHDUP_shuffle_mask)), - (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>; + (v4i32 (MOVSHDUPrr VR128:$src))>, Requires<[HasSSE3]>; def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (loadv2i64 addr:$src)), (undef), MOVSHDUP_shuffle_mask)), - (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>; + (v4i32 (MOVSHDUPrm addr:$src))>, Requires<[HasSSE3]>; // vector_shuffle v1, <0, 0, 2, 2> def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef), MOVSLDUP_shuffle_mask)), - (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>; + (v4i32 (MOVSLDUPrr VR128:$src))>, Requires<[HasSSE3]>; def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (loadv2i64 addr:$src)), (undef), MOVSLDUP_shuffle_mask)), - (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>; + (v4i32 (MOVSLDUPrm addr:$src))>, Requires<[HasSSE3]>; +} -let AddedComplexity = 10 in { +let AddedComplexity = 20 in { // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, MOVHP_shuffle_mask)), - (MOVLHPSrr VR128:$src1, VR128:$src2)>; + (v4i32 (MOVLHPSrr VR128:$src1, VR128:$src2))>; // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, MOVHLPS_shuffle_mask)), - (MOVHLPSrr VR128:$src1, VR128:$src2)>; + (v4i32 (MOVHLPSrr VR128:$src1, VR128:$src2))>; // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS // vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS def : Pat<(v4f32 (vector_shuffle VR128:$src1, (loadv4f32 addr:$src2), MOVLP_shuffle_mask)), - (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>; + (v4f32 (MOVLPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE1]>; def : Pat<(v2f64 (vector_shuffle VR128:$src1, (loadv2f64 addr:$src2), MOVLP_shuffle_mask)), - (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; def : Pat<(v4f32 (vector_shuffle VR128:$src1, (loadv4f32 addr:$src2), MOVHP_shuffle_mask)), - (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>; + (v4f32 (MOVHPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE1]>; def : Pat<(v2f64 (vector_shuffle VR128:$src1, (loadv2f64 addr:$src2), MOVHP_shuffle_mask)), - (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; -def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, - MOVS_shuffle_mask)), - (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>; def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)), MOVLP_shuffle_mask)), - (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; -def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2, - MOVS_shuffle_mask)), - (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>; + (v4i32 (MOVLPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (vector_shuffle VR128:$src1, (loadv2i64 addr:$src2), + MOVLP_shuffle_mask)), + (v2i64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)), MOVHP_shuffle_mask)), - (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + (v4i32 (MOVHPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE1]>; +def : Pat<(v2i64 (vector_shuffle VR128:$src1, (loadv2i64 addr:$src2), + MOVLP_shuffle_mask)), + (v2i64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; + +// Setting the lowest element in the vector. +def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, + MOVL_shuffle_mask)), + (v4i32 (MOVLPSrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2, + MOVL_shuffle_mask)), + (v2i64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; + +// vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd) +def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2, + MOVLP_shuffle_mask)), + (v4f32 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, + MOVLP_shuffle_mask)), + (v4i32 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; + +// Set lowest element and zero upper elements. +def : Pat<(bc_v2i64 (vector_shuffle immAllZerosV, + (v2f64 (scalar_to_vector (loadf64 addr:$src))), + MOVL_shuffle_mask)), + (v2i64 (MOVZQI2PQIrm addr:$src))>, Requires<[HasSSE2]>; } +// FIXME: Temporary workaround since 2-wide shuffle is broken. +def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2), + (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2), + (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2), + (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3), + (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>, + Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3), + (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>, + Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2), + (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)), + (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2), + (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)), + (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2), + (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)), + (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2), + (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)), + (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + // 128-bit logical shifts def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2), (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>, @@ -2462,20 +2537,20 @@ def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2), // Some special case pandn patterns. def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))), VR128:$src2)), - (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))), VR128:$src2)), - (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))), VR128:$src2)), - (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))), (load addr:$src2))), - (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))), (load addr:$src2))), - (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))), (load addr:$src2))), - (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;