X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86InstrSSE.td;h=3c1db6714621fa785b504562692aadd2c9918e82;hb=dad9c5a14ff29cf4b5f7d7352da8f15337bae51f;hp=a1b36747cc5bc55180c6b31a3e18dd67bfde4bac;hpb=1af18985b8662c3b76b7ece2862d07c794708e41;p=oota-llvm.git diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index a1b36747cc5..3c1db671462 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -29,8 +29,6 @@ def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest, [SDNPOutFlag]>; def X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>; -def X86zexts2vec : SDNode<"X86ISD::ZEXT_S2VEC", - SDTypeProfile<1, 1, []>, []>; def X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>; def X86pinsrw : SDNode<"X86ISD::PINSRW", @@ -92,10 +90,6 @@ def SSE_splat_v2_mask : PatLeaf<(build_vector), [{ return X86::isSplatMask(N); }]>; -def MOVLHPS_shuffle_mask : PatLeaf<(build_vector), [{ - return X86::isMOVLHPSMask(N); -}]>; - def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{ return X86::isMOVHLPSMask(N); }]>; @@ -108,8 +102,8 @@ def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{ return X86::isMOVLPMask(N); }]>; -def MOVS_shuffle_mask : PatLeaf<(build_vector), [{ - return X86::isMOVSMask(N); +def MOVL_shuffle_mask : PatLeaf<(build_vector), [{ + return X86::isMOVLMask(N); }]>; def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{ @@ -532,7 +526,7 @@ def CVTSS2SDrr: I<0x5A, MRMSrcReg, (ops FR64:$dst, FR32:$src), Requires<[HasSSE2]>; def CVTSS2SDrm: I<0x5A, MRMSrcMem, (ops FR64:$dst, f32mem:$src), "cvtss2sd {$src, $dst|$dst, $src}", - [(set FR64:$dst, (fextend (loadf32 addr:$src)))]>, XS, + [(set FR64:$dst, (extload addr:$src, f32))]>, XS, Requires<[HasSSE2]>; // Match intrinsics which expect XMM operand(s). @@ -766,10 +760,10 @@ def MOVAPDmr : PDI<0x29, MRMDestMem, (ops f128mem:$dst, VR128:$src), def MOVUPSrr : PSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src), "movups {$src, $dst|$dst, $src}", []>; -def MOVUPSrm : PDI<0x10, MRMSrcMem, (ops VR128:$dst, f128mem:$src), +def MOVUPSrm : PSI<0x10, MRMSrcMem, (ops VR128:$dst, f128mem:$src), "movups {$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>; -def MOVUPSmr : PDI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src), +def MOVUPSmr : PSI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src), "movups {$src, $dst|$dst, $src}", [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>; def MOVUPDrr : PDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src), @@ -782,6 +776,7 @@ def MOVUPDmr : PDI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src), [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>; let isTwoAddress = 1 in { +let AddedComplexity = 20 in { def MOVLPSrm : PSI<0x12, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2), "movlps {$src2, $dst|$dst, $src2}", [(set VR128:$dst, @@ -806,6 +801,7 @@ def MOVHPDrm : PDI<0x16, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2), (v2f64 (vector_shuffle VR128:$src1, (scalar_to_vector (loadf64 addr:$src2)), MOVHP_shuffle_mask)))]>; +} // AddedComplexity } def MOVLPSmr : PSI<0x13, MRMDestMem, (ops f64mem:$dst, VR128:$src), @@ -834,17 +830,19 @@ def MOVHPDmr : PDI<0x17, MRMDestMem, (ops f64mem:$dst, VR128:$src), addr:$dst)]>; let isTwoAddress = 1 in { +let AddedComplexity = 20 in { def MOVLHPSrr : PSI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "movlhps {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, - MOVLHPS_shuffle_mask)))]>; + MOVHP_shuffle_mask)))]>; def MOVHLPSrr : PSI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "movhlps {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, MOVHLPS_shuffle_mask)))]>; +} // AddedComplexity } def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src), @@ -852,7 +850,7 @@ def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src), [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src, (undef), MOVSHDUP_shuffle_mask)))]>; -def MOVSHDUPrm : S3SI<0x16, MRMSrcReg, (ops VR128:$dst, f128mem:$src), +def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (ops VR128:$dst, f128mem:$src), "movshdup {$src, $dst|$dst, $src}", [(set VR128:$dst, (v4f32 (vector_shuffle (loadv4f32 addr:$src), (undef), @@ -863,7 +861,7 @@ def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src), [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src, (undef), MOVSLDUP_shuffle_mask)))]>; -def MOVSLDUPrm : S3SI<0x12, MRMSrcReg, (ops VR128:$dst, f128mem:$src), +def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (ops VR128:$dst, f128mem:$src), "movsldup {$src, $dst|$dst, $src}", [(set VR128:$dst, (v4f32 (vector_shuffle (loadv4f32 addr:$src), (undef), @@ -874,10 +872,11 @@ def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src), [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src, (undef), SSE_splat_v2_mask)))]>; -def MOVDDUPrm : S3DI<0x12, MRMSrcReg, (ops VR128:$dst, f64mem:$src), +def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (ops VR128:$dst, f64mem:$src), "movddup {$src, $dst|$dst, $src}", [(set VR128:$dst, (v2f64 (vector_shuffle - (loadv2f64 addr:$src), (undef), + (scalar_to_vector (loadf64 addr:$src)), + (undef), SSE_splat_v2_mask)))]>; // SSE2 instructions without OpSize prefix @@ -1195,22 +1194,22 @@ def ANDNPDrm : PDI<0x55, MRMSrcMem, (ops VR128:$dst, VR128:$src1,f128mem:$src2), } let isTwoAddress = 1 in { -def CMPPSrr : PSIi8<0xC2, MRMSrcReg, +def CMPPSrri : PSIi8<0xC2, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc), "cmp${cc}ps {$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1, VR128:$src, imm:$cc))]>; -def CMPPSrm : PSIi8<0xC2, MRMSrcMem, +def CMPPSrmi : PSIi8<0xC2, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src, SSECC:$cc), "cmp${cc}ps {$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1, (load addr:$src), imm:$cc))]>; -def CMPPDrr : PDIi8<0xC2, MRMSrcReg, +def CMPPDrri : PDIi8<0xC2, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc), "cmp${cc}pd {$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1, VR128:$src, imm:$cc))]>; -def CMPPDrm : PDIi8<0xC2, MRMSrcMem, +def CMPPDrmi : PDIi8<0xC2, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src, SSECC:$cc), "cmp${cc}pd {$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1, @@ -1219,31 +1218,32 @@ def CMPPDrm : PDIi8<0xC2, MRMSrcMem, // Shuffle and unpack instructions let isTwoAddress = 1 in { -def SHUFPSrr : PSIi8<0xC6, MRMSrcReg, +def SHUFPSrri : PSIi8<0xC6, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2, i32i8imm:$src3), "shufps {$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$src3)))]>; -def SHUFPSrm : PSIi8<0xC6, MRMSrcMem, +def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2, i32i8imm:$src3), "shufps {$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, (load addr:$src2), SHUFP_shuffle_mask:$src3)))]>; -def SHUFPDrr : PDIi8<0xC6, MRMSrcReg, +def SHUFPDrri : PDIi8<0xC6, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2, i8imm:$src3), "shufpd {$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$src3)))]>; -def SHUFPDrm : PDIi8<0xC6, MRMSrcMem, +def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f128mem:$src2, i8imm:$src3), "shufpd {$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, (load addr:$src2), SHUFP_shuffle_mask:$src3)))]>; +let AddedComplexity = 10 in { def UNPCKHPSrr : PSI<0x15, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "unpckhps {$src2, $dst|$dst, $src2}", @@ -1293,6 +1293,7 @@ def UNPCKLPDrm : PDI<0x14, MRMSrcMem, [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, (load addr:$src2), UNPCKL_shuffle_mask)))]>; +} // AddedComplexity } // Horizontal ops @@ -1359,20 +1360,20 @@ def PADDQrr : PDI<0xD4, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), } def PADDBrm : PDI<0xFC, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "paddb {$src2, $dst|$dst, $src2}", - [(set VR128:$dst, (v16i8 (add VR128:$src1, - (load addr:$src2))))]>; + [(set VR128:$dst, (add VR128:$src1, + (bc_v16i8 (loadv2i64 addr:$src2))))]>; def PADDWrm : PDI<0xFD, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "paddw {$src2, $dst|$dst, $src2}", - [(set VR128:$dst, (v8i16 (add VR128:$src1, - (load addr:$src2))))]>; + [(set VR128:$dst, (add VR128:$src1, + (bc_v8i16 (loadv2i64 addr:$src2))))]>; def PADDDrm : PDI<0xFE, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "paddd {$src2, $dst|$dst, $src2}", - [(set VR128:$dst, (v4i32 (add VR128:$src1, - (load addr:$src2))))]>; + [(set VR128:$dst, (add VR128:$src1, + (bc_v4i32 (loadv2i64 addr:$src2))))]>; def PADDQrm : PDI<0xD4, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "paddd {$src2, $dst|$dst, $src2}", - [(set VR128:$dst, (v2i64 (add VR128:$src1, - (load addr:$src2))))]>; + [(set VR128:$dst, (add VR128:$src1, + (loadv2i64 addr:$src2)))]>; let isCommutable = 1 in { def PADDSBrr : PDI<0xEC, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), @@ -1425,20 +1426,20 @@ def PSUBQrr : PDI<0xFB, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), def PSUBBrm : PDI<0xF8, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "psubb {$src2, $dst|$dst, $src2}", - [(set VR128:$dst, (v16i8 (sub VR128:$src1, - (load addr:$src2))))]>; + [(set VR128:$dst, (sub VR128:$src1, + (bc_v16i8 (loadv2i64 addr:$src2))))]>; def PSUBWrm : PDI<0xF9, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "psubw {$src2, $dst|$dst, $src2}", - [(set VR128:$dst, (v8i16 (sub VR128:$src1, - (load addr:$src2))))]>; + [(set VR128:$dst, (sub VR128:$src1, + (bc_v8i16 (loadv2i64 addr:$src2))))]>; def PSUBDrm : PDI<0xFA, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "psubd {$src2, $dst|$dst, $src2}", - [(set VR128:$dst, (v4i32 (sub VR128:$src1, - (load addr:$src2))))]>; + [(set VR128:$dst, (sub VR128:$src1, + (bc_v4i32 (loadv2i64 addr:$src2))))]>; def PSUBQrm : PDI<0xFB, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "psubd {$src2, $dst|$dst, $src2}", - [(set VR128:$dst, (v2i64 (sub VR128:$src1, - (load addr:$src2))))]>; + [(set VR128:$dst, (sub VR128:$src1, + (loadv2i64 addr:$src2)))]>; def PSUBSBrr : PDI<0xE8, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "psubsb {$src2, $dst|$dst, $src2}", @@ -1745,7 +1746,7 @@ def PCMPEQBrr : PDI<0x74, MRMSrcReg, "pcmpeqb {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpeq_b VR128:$src1, VR128:$src2))]>; -def PCMPEQBrm : PDI<0x74, MRMSrcReg, +def PCMPEQBrm : PDI<0x74, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "pcmpeqb {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpeq_b VR128:$src1, @@ -1755,7 +1756,7 @@ def PCMPEQWrr : PDI<0x75, MRMSrcReg, "pcmpeqw {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpeq_w VR128:$src1, VR128:$src2))]>; -def PCMPEQWrm : PDI<0x75, MRMSrcReg, +def PCMPEQWrm : PDI<0x75, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "pcmpeqw {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpeq_w VR128:$src1, @@ -1765,7 +1766,7 @@ def PCMPEQDrr : PDI<0x76, MRMSrcReg, "pcmpeqd {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpeq_d VR128:$src1, VR128:$src2))]>; -def PCMPEQDrm : PDI<0x76, MRMSrcReg, +def PCMPEQDrm : PDI<0x76, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "pcmpeqd {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpeq_d VR128:$src1, @@ -1776,7 +1777,7 @@ def PCMPGTBrr : PDI<0x64, MRMSrcReg, "pcmpgtb {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpgt_b VR128:$src1, VR128:$src2))]>; -def PCMPGTBrm : PDI<0x64, MRMSrcReg, +def PCMPGTBrm : PDI<0x64, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "pcmpgtb {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpgt_b VR128:$src1, @@ -1786,7 +1787,7 @@ def PCMPGTWrr : PDI<0x65, MRMSrcReg, "pcmpgtw {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpgt_w VR128:$src1, VR128:$src2))]>; -def PCMPGTWrm : PDI<0x65, MRMSrcReg, +def PCMPGTWrm : PDI<0x65, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "pcmpgtw {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpgt_w VR128:$src1, @@ -1796,7 +1797,7 @@ def PCMPGTDrr : PDI<0x66, MRMSrcReg, "pcmpgtd {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpgt_d VR128:$src1, VR128:$src2))]>; -def PCMPGTDrm : PDI<0x66, MRMSrcReg, +def PCMPGTDrm : PDI<0x66, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "pcmpgtd {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_pcmpgt_d VR128:$src1, @@ -1823,7 +1824,7 @@ def PACKSSDWrr : PDI<0x6B, MRMSrcReg, (ops VR128:$dst, VR128:$src1, [(set VR128:$dst, (v4i32 (int_x86_sse2_packssdw_128 VR128:$src1, VR128:$src2)))]>; -def PACKSSDWrm : PDI<0x6B, MRMSrcReg, (ops VR128:$dst, VR128:$src1, +def PACKSSDWrm : PDI<0x6B, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "packssdw {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4i32 (int_x86_sse2_packssdw_128 @@ -1835,7 +1836,7 @@ def PACKUSWBrr : PDI<0x67, MRMSrcReg, (ops VR128:$dst, VR128:$src1, [(set VR128:$dst, (v8i16 (int_x86_sse2_packuswb_128 VR128:$src1, VR128:$src2)))]>; -def PACKUSWBrm : PDI<0x67, MRMSrcReg, (ops VR128:$dst, VR128:$src1, +def PACKUSWBrm : PDI<0x67, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2), "packuswb {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v8i16 (int_x86_sse2_packuswb_128 @@ -1987,7 +1988,7 @@ def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem, UNPCKH_shuffle_mask)))]>; def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), - "punpckhdq {$src2, $dst|$dst, $src2}", + "punpckhqdq {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKH_shuffle_mask)))]>; @@ -2006,13 +2007,6 @@ def PEXTRWri : PDIi8<0xC5, MRMSrcReg, "pextrw {$src2, $src1, $dst|$dst, $src1, $src2}", [(set R32:$dst, (X86pextrw (v8i16 VR128:$src1), (i32 imm:$src2)))]>; -def PEXTRWmi : PDIi8<0xC5, MRMSrcMem, - (ops R32:$dst, i128mem:$src1, i32i8imm:$src2), - "pextrw {$src2, $src1, $dst|$dst, $src1, $src2}", - [(set R32:$dst, (X86pextrw - (bc_v8i16 (loadv2i64 addr:$src1)), - (i32 imm:$src2)))]>; - let isTwoAddress = 1 in { def PINSRWrri : PDIi8<0xC4, MRMSrcReg, (ops VR128:$dst, VR128:$src1, R32:$src2, i32i8imm:$src3), @@ -2178,7 +2172,11 @@ def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (ops FR64:$dst, VR128:$src), "movsd {$src, $dst|$dst, $src}", [(set FR64:$dst, (vector_extract (v2f64 VR128:$src), (i32 0)))]>; -def MOVPDI2DIrr : PDI<0x6E, MRMSrcReg, (ops R32:$dst, VR128:$src), +def MOVPD2SDmr : SDI<0x11, MRMDestMem, (ops f64mem:$dst, VR128:$src), + "movsd {$src, $dst|$dst, $src}", + [(store (f64 (vector_extract (v2f64 VR128:$src), + (i32 0))), addr:$dst)]>; +def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (ops R32:$dst, VR128:$src), "movd {$src, $dst|$dst, $src}", [(set R32:$dst, (vector_extract (v4i32 VR128:$src), (i32 0)))]>; @@ -2194,19 +2192,19 @@ def MOVLSS2PSrr : SSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, FR32:$src2) "movss {$src2, $dst|$dst, $src2}", []>; def MOVLSD2PDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, FR64:$src2), "movsd {$src2, $dst|$dst, $src2}", []>; -def MOVLDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, VR128:$src1, R32:$src2), - "movd {$src2, $dst|$dst, $src2}", []>; +let AddedComplexity = 20 in { def MOVLPSrr : SSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "movss {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, - MOVS_shuffle_mask)))]>; + MOVL_shuffle_mask)))]>; def MOVLPDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2), "movsd {$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, VR128:$src2, - MOVS_shuffle_mask)))]>; + MOVL_shuffle_mask)))]>; +} } // Store / copy lower 64-bits of a XMM register. @@ -2214,30 +2212,41 @@ def MOVLQ128mr : PDI<0xD6, MRMDestMem, (ops i64mem:$dst, VR128:$src), "movq {$src, $dst|$dst, $src}", [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>; -// FIXME: Temporary workaround since 2-wide shuffle is broken. -def MOVLQ128rr : PDI<0xD6, MRMSrcReg, (ops VR128:$dst, VR128:$src), - "movq {$src, $dst|$dst, $src}", - [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>; - // Move to lower bits of a VR128 and zeroing upper bits. // Loading from memory automatically zeroing upper bits. +let AddedComplexity = 20 in { def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (ops VR128:$dst, f32mem:$src), "movss {$src, $dst|$dst, $src}", - [(set VR128:$dst, - (v4f32 (X86zexts2vec (loadf32 addr:$src))))]>; + [(set VR128:$dst, (v4f32 (vector_shuffle immAllZerosV, + (v4f32 (scalar_to_vector (loadf32 addr:$src))), + MOVL_shuffle_mask)))]>; def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (ops VR128:$dst, f64mem:$src), "movsd {$src, $dst|$dst, $src}", - [(set VR128:$dst, - (v2f64 (X86zexts2vec (loadf64 addr:$src))))]>; + [(set VR128:$dst, (v2f64 (vector_shuffle immAllZerosV, + (v2f64 (scalar_to_vector (loadf64 addr:$src))), + MOVL_shuffle_mask)))]>; +// movd / movq to XMM register zero-extends +def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, R32:$src), + "movd {$src, $dst|$dst, $src}", + [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV, + (v4i32 (scalar_to_vector R32:$src)), + MOVL_shuffle_mask)))]>; def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (ops VR128:$dst, i32mem:$src), "movd {$src, $dst|$dst, $src}", - [(set VR128:$dst, - (v4i32 (X86zexts2vec (loadi32 addr:$src))))]>; -def MOVZQI2PQIrm : PDI<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src), - "movq {$src, $dst|$dst, $src}", - [(set VR128:$dst, - (bc_v2i64 (v2f64 (X86zexts2vec - (loadf64 addr:$src)))))]>; + [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV, + (v4i32 (scalar_to_vector (loadi32 addr:$src))), + MOVL_shuffle_mask)))]>; +// Moving from XMM to XMM but still clear upper 64 bits. +def MOVZQI2PQIrr : I<0x7E, MRMSrcReg, (ops VR128:$dst, VR128:$src), + "movq {$src, $dst|$dst, $src}", + [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>, + XS, Requires<[HasSSE2]>; +def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src), + "movq {$src, $dst|$dst, $src}", + [(set VR128:$dst, (int_x86_sse2_movl_dq + (bc_v4i32 (loadv2i64 addr:$src))))]>, + XS, Requires<[HasSSE2]>; +} //===----------------------------------------------------------------------===// // Non-Instruction Patterns @@ -2272,9 +2281,9 @@ def : Pat<(store (v4i32 VR128:$src), addr:$dst), // Scalar to v8i16 / v16i8. The source may be a R32, but only the lower 8 or // 16-bits matter. -def : Pat<(v8i16 (X86s2vec R32:$src)), (MOVDI2PDIrr R32:$src)>, +def : Pat<(v8i16 (X86s2vec R32:$src)), (v8i16 (MOVDI2PDIrr R32:$src))>, Requires<[HasSSE2]>; -def : Pat<(v16i8 (X86s2vec R32:$src)), (MOVDI2PDIrr R32:$src)>, +def : Pat<(v16i8 (X86s2vec R32:$src)), (v16i8 (MOVDI2PDIrr R32:$src))>, Requires<[HasSSE2]>; // bit_convert @@ -2339,34 +2348,42 @@ def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>, def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>, Requires<[HasSSE2]>; -// Zeroing a VR128 then do a MOVS* to the lower bits. -def : Pat<(v2f64 (X86zexts2vec FR64:$src)), - (MOVLSD2PDrr (V_SET0_PD), FR64:$src)>, Requires<[HasSSE2]>; -def : Pat<(v4f32 (X86zexts2vec FR32:$src)), - (MOVLSS2PSrr (V_SET0_PS), FR32:$src)>, Requires<[HasSSE2]>; -def : Pat<(v4i32 (X86zexts2vec R32:$src)), - (MOVLDI2PDIrr (V_SET0_PI), R32:$src)>, Requires<[HasSSE2]>; -def : Pat<(v8i16 (X86zexts2vec R16:$src)), - (MOVLDI2PDIrr (V_SET0_PI), (MOVZX32rr16 R16:$src))>, Requires<[HasSSE2]>; -def : Pat<(v16i8 (X86zexts2vec R8:$src)), - (MOVLDI2PDIrr (V_SET0_PI), (MOVZX32rr8 R8:$src))>, Requires<[HasSSE2]>; +// Move scalar to XMM zero-extended +// movd to XMM register zero-extends +let AddedComplexity = 20 in { +def : Pat<(v8i16 (vector_shuffle immAllZerosV, + (v8i16 (X86s2vec R32:$src)), MOVL_shuffle_mask)), + (v8i16 (MOVZDI2PDIrr R32:$src))>, Requires<[HasSSE2]>; +def : Pat<(v16i8 (vector_shuffle immAllZerosV, + (v16i8 (X86s2vec R32:$src)), MOVL_shuffle_mask)), + (v16i8 (MOVZDI2PDIrr R32:$src))>, Requires<[HasSSE2]>; +// Zeroing a VR128 then do a MOVS{S|D} to the lower bits. +def : Pat<(v2f64 (vector_shuffle immAllZerosV, + (v2f64 (scalar_to_vector FR64:$src)), MOVL_shuffle_mask)), + (v2f64 (MOVLSD2PDrr (V_SET0_PD), FR64:$src))>, Requires<[HasSSE2]>; +def : Pat<(v4f32 (vector_shuffle immAllZerosV, + (v4f32 (scalar_to_vector FR32:$src)), MOVL_shuffle_mask)), + (v4f32 (MOVLSS2PSrr (V_SET0_PS), FR32:$src))>, Requires<[HasSSE2]>; +} // Splat v2f64 / v2i64 +let AddedComplexity = 10 in { def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_v2_mask:$sm), (v2f64 (UNPCKLPDrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_v2_mask:$sm), (v2i64 (PUNPCKLQDQrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; +} // Splat v4f32 def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), SSE_splat_mask:$sm), - (v4f32 (SHUFPSrr VR128:$src, VR128:$src, SSE_splat_mask:$sm))>, + (v4f32 (SHUFPSrri VR128:$src, VR128:$src, SSE_splat_mask:$sm))>, Requires<[HasSSE1]>; -// Special unary SHUFPSrr case. +// Special unary SHUFPSrri case. // FIXME: when we want non two-address code, then we should use PSHUFD? def : Pat<(vector_shuffle (v4f32 VR128:$src1), (undef), SHUFP_unary_shuffle_mask:$sm), - (v4f32 (SHUFPSrr VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm))>, + (v4f32 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm))>, Requires<[HasSSE1]>; // Unary v4f32 shuffle with PSHUF* in order to fold a load. def : Pat<(vector_shuffle (loadv4f32 addr:$src1), (undef), @@ -2376,42 +2393,138 @@ def : Pat<(vector_shuffle (loadv4f32 addr:$src1), (undef), // Special binary v4i32 shuffle cases with SHUFPS. def : Pat<(vector_shuffle (v4i32 VR128:$src1), (v4i32 VR128:$src2), PSHUFD_binary_shuffle_mask:$sm), - (v4i32 (SHUFPSrr VR128:$src1, VR128:$src2, + (v4i32 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm))>, Requires<[HasSSE2]>; def : Pat<(vector_shuffle (v4i32 VR128:$src1), (bc_v4i32 (loadv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm), - (v4i32 (SHUFPSrm VR128:$src1, addr:$src2, + (v4i32 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm))>, Requires<[HasSSE2]>; // vector_shuffle v1, , <0, 0, 1, 1, ...> +let AddedComplexity = 10 in { def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef), UNPCKL_v_undef_shuffle_mask)), - (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>; + (v4f32 (UNPCKLPSrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef), UNPCKL_v_undef_shuffle_mask)), - (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>; + (v16i8 (PUNPCKLBWrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef), UNPCKL_v_undef_shuffle_mask)), - (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>; + (v8i16 (PUNPCKLWDrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>; def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef), UNPCKL_v_undef_shuffle_mask)), - (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>; + (v4i32 (PUNPCKLDQrr VR128:$src, VR128:$src))>, Requires<[HasSSE1]>; +} +let AddedComplexity = 20 in { // vector_shuffle v1, <1, 1, 3, 3> def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef), MOVSHDUP_shuffle_mask)), - (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>; + (v4i32 (MOVSHDUPrr VR128:$src))>, Requires<[HasSSE3]>; def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (loadv2i64 addr:$src)), (undef), MOVSHDUP_shuffle_mask)), - (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>; + (v4i32 (MOVSHDUPrm addr:$src))>, Requires<[HasSSE3]>; // vector_shuffle v1, <0, 0, 2, 2> def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef), MOVSLDUP_shuffle_mask)), - (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>; + (v4i32 (MOVSLDUPrr VR128:$src))>, Requires<[HasSSE3]>; def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (loadv2i64 addr:$src)), (undef), MOVSLDUP_shuffle_mask)), - (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>; + (v4i32 (MOVSLDUPrm addr:$src))>, Requires<[HasSSE3]>; +} + +let AddedComplexity = 20 in { +// vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS +def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, + MOVHP_shuffle_mask)), + (v4i32 (MOVLHPSrr VR128:$src1, VR128:$src2))>; + +// vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS +def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, + MOVHLPS_shuffle_mask)), + (v4i32 (MOVHLPSrr VR128:$src1, VR128:$src2))>; + +// vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS +// vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS +def : Pat<(v4f32 (vector_shuffle VR128:$src1, (loadv4f32 addr:$src2), + MOVLP_shuffle_mask)), + (v4f32 (MOVLPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE1]>; +def : Pat<(v2f64 (vector_shuffle VR128:$src1, (loadv2f64 addr:$src2), + MOVLP_shuffle_mask)), + (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(v4f32 (vector_shuffle VR128:$src1, (loadv4f32 addr:$src2), + MOVHP_shuffle_mask)), + (v4f32 (MOVHPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE1]>; +def : Pat<(v2f64 (vector_shuffle VR128:$src1, (loadv2f64 addr:$src2), + MOVHP_shuffle_mask)), + (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; + +def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)), + MOVLP_shuffle_mask)), + (v4i32 (MOVLPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(v2i64 (vector_shuffle VR128:$src1, (loadv2i64 addr:$src2), + MOVLP_shuffle_mask)), + (v2i64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)), + MOVHP_shuffle_mask)), + (v4i32 (MOVHPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE1]>; +def : Pat<(v2i64 (vector_shuffle VR128:$src1, (loadv2i64 addr:$src2), + MOVLP_shuffle_mask)), + (v2i64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; + +// Setting the lowest element in the vector. +def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, + MOVL_shuffle_mask)), + (v4i32 (MOVLPSrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2, + MOVL_shuffle_mask)), + (v2i64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; + +// vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd) +def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2, + MOVLP_shuffle_mask)), + (v4f32 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2, + MOVLP_shuffle_mask)), + (v4i32 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; + +// Set lowest element and zero upper elements. +def : Pat<(bc_v2i64 (vector_shuffle immAllZerosV, + (v2f64 (scalar_to_vector (loadf64 addr:$src))), + MOVL_shuffle_mask)), + (v2i64 (MOVZQI2PQIrm addr:$src))>, Requires<[HasSSE2]>; +} + +// FIXME: Temporary workaround since 2-wide shuffle is broken. +def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2), + (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2), + (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2), + (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3), + (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>, + Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3), + (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>, + Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2), + (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)), + (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2), + (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)), + (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2), + (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)), + (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2), + (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; +def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)), + (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; // 128-bit logical shifts def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2), @@ -2424,20 +2537,20 @@ def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2), // Some special case pandn patterns. def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))), VR128:$src2)), - (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))), VR128:$src2)), - (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))), VR128:$src2)), - (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))), (load addr:$src2))), - (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))), (load addr:$src2))), - (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))), (load addr:$src2))), - (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; + (v2i64 (PANDNrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;