[SDNPOutFlag]>;
def X86s2vec : SDNode<"X86ISD::S2VEC",
SDTypeProfile<1, 1, []>, []>;
-def X86zexts2vec : SDNode<"X86ISD::ZEXT_S2VEC",
- SDTypeProfile<1, 1, []>, []>;
def X86pextrw : SDNode<"X86ISD::PEXTRW",
SDTypeProfile<1, 2, []>, []>;
def X86pinsrw : SDNode<"X86ISD::PINSRW",
return X86::isSplatMask(N);
}]>;
-def MOVLHPS_shuffle_mask : PatLeaf<(build_vector), [{
- return X86::isMOVLHPSMask(N);
-}]>;
-
def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isMOVHLPSMask(N);
}]>;
return X86::isMOVLPMask(N);
}]>;
-def MOVS_shuffle_mask : PatLeaf<(build_vector), [{
- return X86::isMOVSMask(N);
+def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
+ return X86::isMOVLMask(N);
}]>;
def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
// S3I - SSE3 instructions with TB and OpSize prefixes.
// S3SI - SSE3 instructions with XS prefix.
-// S3SI - SSE3 instructions with XD prefix.
+// S3DI - SSE3 instructions with XD prefix.
class SSI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
: I<o, F, ops, asm, pattern>, XS, Requires<[HasSSE1]>;
class SDI<bits<8> o, Format F, dag ops, string asm, list<dag> pattern>
Requires<[HasSSE2]>;
def CVTSS2SDrm: I<0x5A, MRMSrcMem, (ops FR64:$dst, f32mem:$src),
"cvtss2sd {$src, $dst|$dst, $src}",
- [(set FR64:$dst, (fextend (loadf32 addr:$src)))]>, XS,
+ [(set FR64:$dst, (extload addr:$src, f32))]>, XS,
Requires<[HasSSE2]>;
// Match intrinsics which expect XMM operand(s).
def MOVUPSrr : PSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src),
"movups {$src, $dst|$dst, $src}", []>;
-def MOVUPSrm : PDI<0x10, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
+def MOVUPSrm : PSI<0x10, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"movups {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
-def MOVUPSmr : PDI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src),
+def MOVUPSmr : PSI<0x11, MRMDestMem, (ops f128mem:$dst, VR128:$src),
"movups {$src, $dst|$dst, $src}",
[(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
def MOVUPDrr : PDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src),
[(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
let isTwoAddress = 1 in {
+let AddedComplexity = 20 in {
def MOVLPSrm : PSI<0x12, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
"movlps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v2f64 (vector_shuffle VR128:$src1,
(scalar_to_vector (loadf64 addr:$src2)),
MOVHP_shuffle_mask)))]>;
+} // AddedComplexity
}
def MOVLPSmr : PSI<0x13, MRMDestMem, (ops f64mem:$dst, VR128:$src),
addr:$dst)]>;
let isTwoAddress = 1 in {
+let AddedComplexity = 20 in {
def MOVLHPSrr : PSI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"movlhps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
- MOVLHPS_shuffle_mask)))]>;
+ MOVHP_shuffle_mask)))]>;
def MOVHLPSrr : PSI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"movhlps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
MOVHLPS_shuffle_mask)))]>;
+} // AddedComplexity
}
def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src),
[(set VR128:$dst, (v4f32 (vector_shuffle
VR128:$src, (undef),
MOVSHDUP_shuffle_mask)))]>;
-def MOVSHDUPrm : S3SI<0x16, MRMSrcReg, (ops VR128:$dst, f128mem:$src),
+def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"movshdup {$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4f32 (vector_shuffle
(loadv4f32 addr:$src), (undef),
[(set VR128:$dst, (v4f32 (vector_shuffle
VR128:$src, (undef),
MOVSLDUP_shuffle_mask)))]>;
-def MOVSLDUPrm : S3SI<0x12, MRMSrcReg, (ops VR128:$dst, f128mem:$src),
+def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (ops VR128:$dst, f128mem:$src),
"movsldup {$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4f32 (vector_shuffle
(loadv4f32 addr:$src), (undef),
[(set VR128:$dst, (v2f64 (vector_shuffle
VR128:$src, (undef),
SSE_splat_v2_mask)))]>;
-def MOVDDUPrm : S3DI<0x12, MRMSrcReg, (ops VR128:$dst, f64mem:$src),
+def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
"movddup {$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (vector_shuffle
- (loadv2f64 addr:$src), (undef),
+ (scalar_to_vector (loadf64 addr:$src)),
+ (undef),
SSE_splat_v2_mask)))]>;
// SSE2 instructions without OpSize prefix
}
let isTwoAddress = 1 in {
-def CMPPSrr : PSIi8<0xC2, MRMSrcReg,
+def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc),
"cmp${cc}ps {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
VR128:$src, imm:$cc))]>;
-def CMPPSrm : PSIi8<0xC2, MRMSrcMem,
+def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f128mem:$src, SSECC:$cc),
"cmp${cc}ps {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
(load addr:$src), imm:$cc))]>;
-def CMPPDrr : PDIi8<0xC2, MRMSrcReg,
+def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src, SSECC:$cc),
"cmp${cc}pd {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
VR128:$src, imm:$cc))]>;
-def CMPPDrm : PDIi8<0xC2, MRMSrcMem,
+def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f128mem:$src, SSECC:$cc),
"cmp${cc}pd {$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
// Shuffle and unpack instructions
let isTwoAddress = 1 in {
-def SHUFPSrr : PSIi8<0xC6, MRMSrcReg,
+def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2, i32i8imm:$src3),
"shufps {$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst, (v4f32 (vector_shuffle
VR128:$src1, VR128:$src2,
SHUFP_shuffle_mask:$src3)))]>;
-def SHUFPSrm : PSIi8<0xC6, MRMSrcMem,
+def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f128mem:$src2, i32i8imm:$src3),
"shufps {$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst, (v4f32 (vector_shuffle
VR128:$src1, (load addr:$src2),
SHUFP_shuffle_mask:$src3)))]>;
-def SHUFPDrr : PDIi8<0xC6, MRMSrcReg,
+def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2, i8imm:$src3),
"shufpd {$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst, (v2f64 (vector_shuffle
VR128:$src1, VR128:$src2,
SHUFP_shuffle_mask:$src3)))]>;
-def SHUFPDrm : PDIi8<0xC6, MRMSrcMem,
+def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f128mem:$src2, i8imm:$src3),
"shufpd {$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst, (v2f64 (vector_shuffle
VR128:$src1, (load addr:$src2),
SHUFP_shuffle_mask:$src3)))]>;
+let AddedComplexity = 10 in {
def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
"unpckhps {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v2f64 (vector_shuffle
VR128:$src1, (load addr:$src2),
UNPCKL_shuffle_mask)))]>;
+} // AddedComplexity
}
// Horizontal ops
int_x86_sse3_hadd_pd>;
def HADDPDrm : S3_Intrm<0x7C, "haddpd {$src2, $dst|$dst, $src2}",
int_x86_sse3_hadd_pd>;
-def HSUBPSrr : S3D_Intrr<0x7C, "hsubps {$src2, $dst|$dst, $src2}",
+def HSUBPSrr : S3D_Intrr<0x7D, "hsubps {$src2, $dst|$dst, $src2}",
int_x86_sse3_hsub_ps>;
-def HSUBPSrm : S3D_Intrm<0x7C, "hsubps {$src2, $dst|$dst, $src2}",
+def HSUBPSrm : S3D_Intrm<0x7D, "hsubps {$src2, $dst|$dst, $src2}",
int_x86_sse3_hsub_ps>;
-def HSUBPDrr : S3_Intrr<0x7C, "hsubpd {$src2, $dst|$dst, $src2}",
+def HSUBPDrr : S3_Intrr<0x7D, "hsubpd {$src2, $dst|$dst, $src2}",
int_x86_sse3_hsub_pd>;
-def HSUBPDrm : S3_Intrm<0x7C, "hsubpd {$src2, $dst|$dst, $src2}",
+def HSUBPDrm : S3_Intrm<0x7D, "hsubpd {$src2, $dst|$dst, $src2}",
int_x86_sse3_hsub_pd>;
}
}
def PADDBrm : PDI<0xFC, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"paddb {$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v16i8 (add VR128:$src1,
- (load addr:$src2))))]>;
+ [(set VR128:$dst, (add VR128:$src1,
+ (bc_v16i8 (loadv2i64 addr:$src2))))]>;
def PADDWrm : PDI<0xFD, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"paddw {$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v8i16 (add VR128:$src1,
- (load addr:$src2))))]>;
+ [(set VR128:$dst, (add VR128:$src1,
+ (bc_v8i16 (loadv2i64 addr:$src2))))]>;
def PADDDrm : PDI<0xFE, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"paddd {$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v4i32 (add VR128:$src1,
- (load addr:$src2))))]>;
+ [(set VR128:$dst, (add VR128:$src1,
+ (bc_v4i32 (loadv2i64 addr:$src2))))]>;
def PADDQrm : PDI<0xD4, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"paddd {$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64 (add VR128:$src1,
- (load addr:$src2))))]>;
+ [(set VR128:$dst, (add VR128:$src1,
+ (loadv2i64 addr:$src2)))]>;
let isCommutable = 1 in {
def PADDSBrr : PDI<0xEC, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
def PSUBBrm : PDI<0xF8, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psubb {$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v16i8 (sub VR128:$src1,
- (load addr:$src2))))]>;
+ [(set VR128:$dst, (sub VR128:$src1,
+ (bc_v16i8 (loadv2i64 addr:$src2))))]>;
def PSUBWrm : PDI<0xF9, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psubw {$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v8i16 (sub VR128:$src1,
- (load addr:$src2))))]>;
+ [(set VR128:$dst, (sub VR128:$src1,
+ (bc_v8i16 (loadv2i64 addr:$src2))))]>;
def PSUBDrm : PDI<0xFA, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psubd {$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v4i32 (sub VR128:$src1,
- (load addr:$src2))))]>;
+ [(set VR128:$dst, (sub VR128:$src1,
+ (bc_v4i32 (loadv2i64 addr:$src2))))]>;
def PSUBQrm : PDI<0xFB, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psubd {$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64 (sub VR128:$src1,
- (load addr:$src2))))]>;
+ [(set VR128:$dst, (sub VR128:$src1,
+ (loadv2i64 addr:$src2)))]>;
def PSUBSBrr : PDI<0xE8, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"psubsb {$src2, $dst|$dst, $src2}",
"psllw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psll_w VR128:$src1,
VR128:$src2))]>;
-def PSLLWrm : PDIi8<0xF1, MRMSrcReg, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
+def PSLLWrm : PDIi8<0xF1, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psllw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psll_w VR128:$src1,
(bc_v4i32 (loadv2i64 addr:$src2))))]>;
"pslld {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psll_d VR128:$src1,
VR128:$src2))]>;
-def PSLLDrm : PDIi8<0xF2, MRMSrcReg, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
+def PSLLDrm : PDIi8<0xF2, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"pslld {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psll_d VR128:$src1,
(bc_v4i32 (loadv2i64 addr:$src2))))]>;
"psllq {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psll_q VR128:$src1,
VR128:$src2))]>;
-def PSLLQrm : PDIi8<0xF3, MRMSrcReg, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
+def PSLLQrm : PDIi8<0xF3, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psllq {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psll_q VR128:$src1,
(bc_v4i32 (loadv2i64 addr:$src2))))]>;
"psrlw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psrl_w VR128:$src1,
VR128:$src2))]>;
-def PSRLWrm : PDIi8<0xD1, MRMSrcReg, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
+def PSRLWrm : PDIi8<0xD1, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psrlw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psrl_w VR128:$src1,
(bc_v4i32 (loadv2i64 addr:$src2))))]>;
"psrld {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psrl_d VR128:$src1,
VR128:$src2))]>;
-def PSRLDrm : PDIi8<0xD2, MRMSrcReg, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
+def PSRLDrm : PDIi8<0xD2, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psrld {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psrl_d VR128:$src1,
(bc_v4i32 (loadv2i64 addr:$src2))))]>;
"psrlq {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psrl_q VR128:$src1,
VR128:$src2))]>;
-def PSRLQrm : PDIi8<0xD3, MRMSrcReg, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
+def PSRLQrm : PDIi8<0xD3, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psrlq {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psrl_q VR128:$src1,
(bc_v4i32 (loadv2i64 addr:$src2))))]>;
"psraw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psra_w VR128:$src1,
VR128:$src2))]>;
-def PSRAWrm : PDIi8<0xE1, MRMSrcReg, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
+def PSRAWrm : PDIi8<0xE1, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psraw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psra_w VR128:$src1,
(bc_v4i32 (loadv2i64 addr:$src2))))]>;
"psrad {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psra_d VR128:$src1,
VR128:$src2))]>;
-def PSRADrm : PDIi8<0xE2, MRMSrcReg, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
+def PSRADrm : PDIi8<0xE2, MRMSrcMem, (ops VR128:$dst, VR128:$src1, i128mem:$src2),
"psrad {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_psra_d VR128:$src1,
(bc_v4i32 (loadv2i64 addr:$src2))))]>;
"pcmpeqb {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpeq_b VR128:$src1,
VR128:$src2))]>;
-def PCMPEQBrm : PDI<0x74, MRMSrcReg,
+def PCMPEQBrm : PDI<0x74, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
"pcmpeqb {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpeq_b VR128:$src1,
"pcmpeqw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpeq_w VR128:$src1,
VR128:$src2))]>;
-def PCMPEQWrm : PDI<0x75, MRMSrcReg,
+def PCMPEQWrm : PDI<0x75, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
"pcmpeqw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpeq_w VR128:$src1,
"pcmpeqd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpeq_d VR128:$src1,
VR128:$src2))]>;
-def PCMPEQDrm : PDI<0x76, MRMSrcReg,
+def PCMPEQDrm : PDI<0x76, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
"pcmpeqd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpeq_d VR128:$src1,
"pcmpgtb {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpgt_b VR128:$src1,
VR128:$src2))]>;
-def PCMPGTBrm : PDI<0x64, MRMSrcReg,
+def PCMPGTBrm : PDI<0x64, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
"pcmpgtb {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpgt_b VR128:$src1,
"pcmpgtw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpgt_w VR128:$src1,
VR128:$src2))]>;
-def PCMPGTWrm : PDI<0x65, MRMSrcReg,
+def PCMPGTWrm : PDI<0x65, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
"pcmpgtw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpgt_w VR128:$src1,
"pcmpgtd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpgt_d VR128:$src1,
VR128:$src2))]>;
-def PCMPGTDrm : PDI<0x66, MRMSrcReg,
+def PCMPGTDrm : PDI<0x66, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
"pcmpgtd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_pcmpgt_d VR128:$src1,
[(set VR128:$dst, (v4i32 (int_x86_sse2_packssdw_128
VR128:$src1,
VR128:$src2)))]>;
-def PACKSSDWrm : PDI<0x6B, MRMSrcReg, (ops VR128:$dst, VR128:$src1,
+def PACKSSDWrm : PDI<0x6B, MRMSrcMem, (ops VR128:$dst, VR128:$src1,
i128mem:$src2),
"packssdw {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v4i32 (int_x86_sse2_packssdw_128
[(set VR128:$dst, (v8i16 (int_x86_sse2_packuswb_128
VR128:$src1,
VR128:$src2)))]>;
-def PACKUSWBrm : PDI<0x67, MRMSrcReg, (ops VR128:$dst, VR128:$src1,
+def PACKUSWBrm : PDI<0x67, MRMSrcMem, (ops VR128:$dst, VR128:$src1,
i128mem:$src2),
"packuswb {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (v8i16 (int_x86_sse2_packuswb_128
UNPCKH_shuffle_mask)))]>;
def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
- "punpckhdq {$src2, $dst|$dst, $src2}",
+ "punpckhqdq {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
UNPCKH_shuffle_mask)))]>;
"pextrw {$src2, $src1, $dst|$dst, $src1, $src2}",
[(set R32:$dst, (X86pextrw (v8i16 VR128:$src1),
(i32 imm:$src2)))]>;
-def PEXTRWmi : PDIi8<0xC5, MRMSrcMem,
- (ops R32:$dst, i128mem:$src1, i32i8imm:$src2),
- "pextrw {$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set R32:$dst, (X86pextrw
- (bc_v8i16 (loadv2i64 addr:$src1)),
- (i32 imm:$src2)))]>;
-
let isTwoAddress = 1 in {
def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, R32:$src2, i32i8imm:$src3),
"movsd {$src, $dst|$dst, $src}",
[(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
(i32 0)))]>;
-def MOVPDI2DIrr : PDI<0x6E, MRMSrcReg, (ops R32:$dst, VR128:$src),
+def MOVPD2SDmr : SDI<0x11, MRMDestMem, (ops f64mem:$dst, VR128:$src),
+ "movsd {$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract (v2f64 VR128:$src),
+ (i32 0))), addr:$dst)]>;
+def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (ops R32:$dst, VR128:$src),
"movd {$src, $dst|$dst, $src}",
[(set R32:$dst, (vector_extract (v4i32 VR128:$src),
(i32 0)))]>;
"movss {$src2, $dst|$dst, $src2}", []>;
def MOVLSD2PDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, FR64:$src2),
"movsd {$src2, $dst|$dst, $src2}", []>;
-def MOVLDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, VR128:$src1, R32:$src2),
- "movd {$src2, $dst|$dst, $src2}", []>;
+let AddedComplexity = 20 in {
def MOVLPSrr : SSI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"movss {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
- MOVS_shuffle_mask)))]>;
+ MOVL_shuffle_mask)))]>;
def MOVLPDrr : SDI<0x10, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
"movsd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
- MOVS_shuffle_mask)))]>;
+ MOVL_shuffle_mask)))]>;
+}
}
// Store / copy lower 64-bits of a XMM register.
"movq {$src, $dst|$dst, $src}",
[(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
-// FIXME: Temporary workaround since 2-wide shuffle is broken.
-def MOVLQ128rr : PDI<0xD6, MRMSrcReg, (ops VR128:$dst, VR128:$src),
- "movq {$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>;
-
// Move to lower bits of a VR128 and zeroing upper bits.
// Loading from memory automatically zeroing upper bits.
+let AddedComplexity = 20 in {
def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (ops VR128:$dst, f32mem:$src),
"movss {$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v4f32 (X86zexts2vec (loadf32 addr:$src))))]>;
+ [(set VR128:$dst, (v4f32 (vector_shuffle immAllZerosV,
+ (v4f32 (scalar_to_vector (loadf32 addr:$src))),
+ MOVL_shuffle_mask)))]>;
def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
"movsd {$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v2f64 (X86zexts2vec (loadf64 addr:$src))))]>;
+ [(set VR128:$dst, (v2f64 (vector_shuffle immAllZerosV,
+ (v2f64 (scalar_to_vector (loadf64 addr:$src))),
+ MOVL_shuffle_mask)))]>;
+// movd / movq to XMM register zero-extends
+def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, R32:$src),
+ "movd {$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV,
+ (v4i32 (scalar_to_vector R32:$src)),
+ MOVL_shuffle_mask)))]>;
def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (ops VR128:$dst, i32mem:$src),
"movd {$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v4i32 (X86zexts2vec (loadi32 addr:$src))))]>;
-def MOVZQI2PQIrm : PDI<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src),
- "movq {$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (bc_v2i64 (v2f64 (X86zexts2vec
- (loadf64 addr:$src)))))]>;
+ [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))),
+ MOVL_shuffle_mask)))]>;
+// Moving from XMM to XMM but still clear upper 64 bits.
+def MOVZQI2PQIrr : I<0x7E, MRMSrcReg, (ops VR128:$dst, VR128:$src),
+ "movq {$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>,
+ XS, Requires<[HasSSE2]>;
+def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (ops VR128:$dst, i64mem:$src),
+ "movq {$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_movl_dq
+ (bc_v4i32 (loadv2i64 addr:$src))))]>,
+ XS, Requires<[HasSSE2]>;
+}
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
// Scalar to v8i16 / v16i8. The source may be a R32, but only the lower 8 or
// 16-bits matter.
-def : Pat<(v8i16 (X86s2vec R32:$src)), (MOVDI2PDIrr R32:$src)>,
+def : Pat<(v8i16 (X86s2vec R32:$src)), (v8i16 (MOVDI2PDIrr R32:$src))>,
Requires<[HasSSE2]>;
-def : Pat<(v16i8 (X86s2vec R32:$src)), (MOVDI2PDIrr R32:$src)>,
+def : Pat<(v16i8 (X86s2vec R32:$src)), (v16i8 (MOVDI2PDIrr R32:$src))>,
Requires<[HasSSE2]>;
// bit_convert
def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>,
Requires<[HasSSE2]>;
-// Zeroing a VR128 then do a MOVS* to the lower bits.
-def : Pat<(v2f64 (X86zexts2vec FR64:$src)),
- (MOVLSD2PDrr (V_SET0_PD), FR64:$src)>, Requires<[HasSSE2]>;
-def : Pat<(v4f32 (X86zexts2vec FR32:$src)),
- (MOVLSS2PSrr (V_SET0_PS), FR32:$src)>, Requires<[HasSSE2]>;
-def : Pat<(v4i32 (X86zexts2vec R32:$src)),
- (MOVLDI2PDIrr (V_SET0_PI), R32:$src)>, Requires<[HasSSE2]>;
-def : Pat<(v8i16 (X86zexts2vec R16:$src)),
- (MOVLDI2PDIrr (V_SET0_PI), (MOVZX32rr16 R16:$src))>, Requires<[HasSSE2]>;
-def : Pat<(v16i8 (X86zexts2vec R8:$src)),
- (MOVLDI2PDIrr (V_SET0_PI), (MOVZX32rr8 R8:$src))>, Requires<[HasSSE2]>;
+// Move scalar to XMM zero-extended
+// movd to XMM register zero-extends
+let AddedComplexity = 20 in {
+def : Pat<(v8i16 (vector_shuffle immAllZerosV,
+ (v8i16 (X86s2vec R32:$src)), MOVL_shuffle_mask)),
+ (v8i16 (MOVZDI2PDIrr R32:$src))>, Requires<[HasSSE2]>;
+def : Pat<(v16i8 (vector_shuffle immAllZerosV,
+ (v16i8 (X86s2vec R32:$src)), MOVL_shuffle_mask)),
+ (v16i8 (MOVZDI2PDIrr R32:$src))>, Requires<[HasSSE2]>;
+// Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
+def : Pat<(v2f64 (vector_shuffle immAllZerosV,
+ (v2f64 (scalar_to_vector FR64:$src)), MOVL_shuffle_mask)),
+ (v2f64 (MOVLSD2PDrr (V_SET0_PD), FR64:$src))>, Requires<[HasSSE2]>;
+def : Pat<(v4f32 (vector_shuffle immAllZerosV,
+ (v4f32 (scalar_to_vector FR32:$src)), MOVL_shuffle_mask)),
+ (v4f32 (MOVLSS2PSrr (V_SET0_PS), FR32:$src))>, Requires<[HasSSE2]>;
+}
// Splat v2f64 / v2i64
+let AddedComplexity = 10 in {
def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_v2_mask:$sm),
(v2f64 (UNPCKLPDrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>;
def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_v2_mask:$sm),
(v2i64 (PUNPCKLQDQrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>;
+}
// Splat v4f32
def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), SSE_splat_mask:$sm),
- (v4f32 (SHUFPSrr VR128:$src, VR128:$src, SSE_splat_mask:$sm))>,
+ (v4f32 (SHUFPSrri VR128:$src, VR128:$src, SSE_splat_mask:$sm))>,
Requires<[HasSSE1]>;
-// Special unary SHUFPSrr case.
+// Special unary SHUFPSrri case.
// FIXME: when we want non two-address code, then we should use PSHUFD?
def : Pat<(vector_shuffle (v4f32 VR128:$src1), (undef),
SHUFP_unary_shuffle_mask:$sm),
- (v4f32 (SHUFPSrr VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm))>,
+ (v4f32 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm))>,
Requires<[HasSSE1]>;
// Unary v4f32 shuffle with PSHUF* in order to fold a load.
def : Pat<(vector_shuffle (loadv4f32 addr:$src1), (undef),
// Special binary v4i32 shuffle cases with SHUFPS.
def : Pat<(vector_shuffle (v4i32 VR128:$src1), (v4i32 VR128:$src2),
PSHUFD_binary_shuffle_mask:$sm),
- (v4i32 (SHUFPSrr VR128:$src1, VR128:$src2,
+ (v4i32 (SHUFPSrri VR128:$src1, VR128:$src2,
PSHUFD_binary_shuffle_mask:$sm))>, Requires<[HasSSE2]>;
def : Pat<(vector_shuffle (v4i32 VR128:$src1),
(bc_v4i32 (loadv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm),
- (v4i32 (SHUFPSrm VR128:$src1, addr:$src2,
+ (v4i32 (SHUFPSrmi VR128:$src1, addr:$src2,
PSHUFD_binary_shuffle_mask:$sm))>, Requires<[HasSSE2]>;
// vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
+let AddedComplexity = 10 in {
def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
UNPCKL_v_undef_shuffle_mask)),
- (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
+ (v4f32 (UNPCKLPSrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>;
def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
UNPCKL_v_undef_shuffle_mask)),
- (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
+ (v16i8 (PUNPCKLBWrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>;
def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
UNPCKL_v_undef_shuffle_mask)),
- (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
+ (v8i16 (PUNPCKLWDrr VR128:$src, VR128:$src))>, Requires<[HasSSE2]>;
def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
UNPCKL_v_undef_shuffle_mask)),
- (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
+ (v4i32 (PUNPCKLDQrr VR128:$src, VR128:$src))>, Requires<[HasSSE1]>;
+}
+let AddedComplexity = 20 in {
// vector_shuffle v1, <undef> <1, 1, 3, 3>
def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
MOVSHDUP_shuffle_mask)),
- (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
+ (v4i32 (MOVSHDUPrr VR128:$src))>, Requires<[HasSSE3]>;
def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (loadv2i64 addr:$src)), (undef),
MOVSHDUP_shuffle_mask)),
- (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
+ (v4i32 (MOVSHDUPrm addr:$src))>, Requires<[HasSSE3]>;
// vector_shuffle v1, <undef> <0, 0, 2, 2>
def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
MOVSLDUP_shuffle_mask)),
- (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
+ (v4i32 (MOVSLDUPrr VR128:$src))>, Requires<[HasSSE3]>;
def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (loadv2i64 addr:$src)), (undef),
MOVSLDUP_shuffle_mask)),
- (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
+ (v4i32 (MOVSLDUPrm addr:$src))>, Requires<[HasSSE3]>;
+}
+
+let AddedComplexity = 20 in {
+// vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
+def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
+ MOVHP_shuffle_mask)),
+ (v4i32 (MOVLHPSrr VR128:$src1, VR128:$src2))>;
+
+// vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
+def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
+ MOVHLPS_shuffle_mask)),
+ (v4i32 (MOVHLPSrr VR128:$src1, VR128:$src2))>;
+
+// vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
+// vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
+def : Pat<(v4f32 (vector_shuffle VR128:$src1, (loadv4f32 addr:$src2),
+ MOVLP_shuffle_mask)),
+ (v4f32 (MOVLPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE1]>;
+def : Pat<(v2f64 (vector_shuffle VR128:$src1, (loadv2f64 addr:$src2),
+ MOVLP_shuffle_mask)),
+ (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(v4f32 (vector_shuffle VR128:$src1, (loadv4f32 addr:$src2),
+ MOVHP_shuffle_mask)),
+ (v4f32 (MOVHPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE1]>;
+def : Pat<(v2f64 (vector_shuffle VR128:$src1, (loadv2f64 addr:$src2),
+ MOVHP_shuffle_mask)),
+ (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+
+def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)),
+ MOVLP_shuffle_mask)),
+ (v4i32 (MOVLPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(v2i64 (vector_shuffle VR128:$src1, (loadv2i64 addr:$src2),
+ MOVLP_shuffle_mask)),
+ (v2i64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)),
+ MOVHP_shuffle_mask)),
+ (v4i32 (MOVHPSrm VR128:$src1, addr:$src2))>, Requires<[HasSSE1]>;
+def : Pat<(v2i64 (vector_shuffle VR128:$src1, (loadv2i64 addr:$src2),
+ MOVLP_shuffle_mask)),
+ (v2i64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+
+// Setting the lowest element in the vector.
+def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
+ MOVL_shuffle_mask)),
+ (v4i32 (MOVLPSrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
+ MOVL_shuffle_mask)),
+ (v2i64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
+
+// vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
+def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
+ MOVLP_shuffle_mask)),
+ (v4f32 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
+ MOVLP_shuffle_mask)),
+ (v4i32 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
+
+// Set lowest element and zero upper elements.
+def : Pat<(bc_v2i64 (vector_shuffle immAllZerosV,
+ (v2f64 (scalar_to_vector (loadf64 addr:$src))),
+ MOVL_shuffle_mask)),
+ (v2i64 (MOVZQI2PQIrm addr:$src))>, Requires<[HasSSE2]>;
+}
+
+// FIXME: Temporary workaround since 2-wide shuffle is broken.
+def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2),
+ (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2),
+ (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2),
+ (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3),
+ (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>,
+ Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3),
+ (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>,
+ Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2),
+ (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)),
+ (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2),
+ (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)),
+ (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2),
+ (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)),
+ (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2),
+ (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)),
+ (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
// 128-bit logical shifts
def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
// Some special case pandn patterns.
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
VR128:$src2)),
- (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
+ (v2i64 (PANDNrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
VR128:$src2)),
- (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
+ (v2i64 (PANDNrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
VR128:$src2)),
- (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
+ (v2i64 (PANDNrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
(load addr:$src2))),
- (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
+ (v2i64 (PANDNrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
(load addr:$src2))),
- (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
+ (v2i64 (PANDNrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
(load addr:$src2))),
- (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
+ (v2i64 (PANDNrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;