//===----------------------------------------------------------------------===//
// Data Transfer Instructions
-let neverHasSideEffects = 1 in
def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
- "movd\t{$src, $dst|$dst, $src}", []>;
-let isSimpleLoad = 1, mayLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR64:$dst, (v2i32 (scalar_to_vector GR32:$src)))]>;
+let isSimpleLoad = 1, isReMaterializable = 1 in
def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
- "movd\t{$src, $dst|$dst, $src}", []>;
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR64:$dst, (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>;
let mayStore = 1 in
def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src),
"movd\t{$src, $dst|$dst, $src}", []>;
def : Pat<(v8i8 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
-def MMX_X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>;
-
// Move scalar to XMM zero-extended
// movd to XMM register zero-extends
let AddedComplexity = 15 in {
def : Pat<(v8i8 (vector_shuffle immAllZerosV_bc,
- (v8i8 (MMX_X86s2vec GR32:$src)), MMX_MOVL_shuffle_mask)),
+ (bc_v8i8 (v2i32 (scalar_to_vector GR32:$src))),
+ MMX_MOVL_shuffle_mask)),
(MMX_MOVZDI2PDIrr GR32:$src)>;
def : Pat<(v4i16 (vector_shuffle immAllZerosV_bc,
- (v4i16 (MMX_X86s2vec GR32:$src)), MMX_MOVL_shuffle_mask)),
- (MMX_MOVZDI2PDIrr GR32:$src)>;
- def : Pat<(v2i32 (vector_shuffle immAllZerosV,
- (v2i32 (MMX_X86s2vec GR32:$src)), MMX_MOVL_shuffle_mask)),
+ (bc_v4i16 (v2i32 (scalar_to_vector GR32:$src))),
+ MMX_MOVL_shuffle_mask)),
(MMX_MOVZDI2PDIrr GR32:$src)>;
}
-// Scalar to v2i32 / v4i16 / v8i8. The source may be a GR32, but only the lower
+// Scalar to v4i16 / v8i8. The source may be a GR32, but only the lower
// 8 or 16-bits matter.
-def : Pat<(v8i8 (MMX_X86s2vec GR32:$src)), (MMX_MOVD64rr GR32:$src)>;
-def : Pat<(v4i16 (MMX_X86s2vec GR32:$src)), (MMX_MOVD64rr GR32:$src)>;
-def : Pat<(v2i32 (MMX_X86s2vec GR32:$src)), (MMX_MOVD64rr GR32:$src)>;
+def : Pat<(bc_v8i8 (v2i32 (scalar_to_vector GR32:$src))),
+ (MMX_MOVD64rr GR32:$src)>;
+def : Pat<(bc_v4i16 (v2i32 (scalar_to_vector GR32:$src))),
+ (MMX_MOVD64rr GR32:$src)>;
// Patterns to perform canonical versions of vector shuffling.
let AddedComplexity = 10 in {
def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
-def X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>;
def X86pextrb : SDNode<"X86ISD::PEXTRB",
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
def X86pextrw : SDNode<"X86ISD::PEXTRW",
(bitconvert (memopv2i64 addr:$src2))))]>;
}
-multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
- string OpcodeStr, Intrinsic IntId> {
- def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
- def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1,
- (bitconvert (memopv2i64 addr:$src2))))]>;
- def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1,
- (scalar_to_vector (i32 imm:$src2))))]>;
-}
-
-
/// PDI_binop_rm - Simple SSE2 binary operator.
multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
ValueType OpVT, bit Commutable = 0> {
defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
-defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw", int_x86_sse2_psll_w>;
-defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld", int_x86_sse2_psll_d>;
-defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq", int_x86_sse2_psll_q>;
-
-defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw", int_x86_sse2_psrl_w>;
-defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld", int_x86_sse2_psrl_d>;
-defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq", int_x86_sse2_psrl_q>;
+defm PSLLW : PDI_binop_rm_int<0xF1, "psllw", int_x86_sse2_psll_w>;
+defm PSLLD : PDI_binop_rm_int<0xF2, "pslld", int_x86_sse2_psll_d>;
+defm PSLLQ : PDI_binop_rm_int<0xF3, "psllq", int_x86_sse2_psll_q>;
+
+defm PSRLW : PDI_binop_rm_int<0xD1, "psrlw", int_x86_sse2_psrl_w>;
+defm PSRLD : PDI_binop_rm_int<0xD2, "psrld", int_x86_sse2_psrl_d>;
+defm PSRLQ : PDI_binop_rm_int<0xD3, "psrlq", int_x86_sse2_psrl_q>;
+
+defm PSRAW : PDI_binop_rm_int<0xE1, "psraw", int_x86_sse2_psra_w>;
+defm PSRAD : PDI_binop_rm_int<0xE2, "psrad", int_x86_sse2_psra_d>;
+
+// Some immediate variants need to match a bit_convert.
+def PSLLWri : PDIi8<0x71, MRM6r, (outs VR128:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "psllw\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_psll_w VR128:$src1,
+ (bc_v8i16 (v4i32 (scalar_to_vector (i32 imm:$src2))))))]>;
+def PSLLDri : PDIi8<0x72, MRM6r, (outs VR128:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "pslld\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_psll_d VR128:$src1,
+ (scalar_to_vector (i32 imm:$src2))))]>;
+def PSLLQri : PDIi8<0x73, MRM6r, (outs VR128:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "psllq\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_psll_q VR128:$src1,
+ (bc_v2i64 (v4i32 (scalar_to_vector (i32 imm:$src2))))))]>;
+
+def PSRLWri : PDIi8<0x71, MRM2r, (outs VR128:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "psrlw\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_psrl_w VR128:$src1,
+ (bc_v8i16 (v4i32 (scalar_to_vector (i32 imm:$src2))))))]>;
+def PSRLDri : PDIi8<0x72, MRM2r, (outs VR128:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "psrld\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_psrl_d VR128:$src1,
+ (scalar_to_vector (i32 imm:$src2))))]>;
+def PSRLQri : PDIi8<0x73, MRM2r, (outs VR128:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "psrlq\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_psrl_q VR128:$src1,
+ (bc_v2i64 (v4i32 (scalar_to_vector (i32 imm:$src2))))))]>;
+
+def PSRAWri : PDIi8<0x71, MRM4r, (outs VR128:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "psraw\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_psra_w VR128:$src1,
+ (bc_v8i16 (v4i32 (scalar_to_vector (i32 imm:$src2))))))]>;
+def PSRADri : PDIi8<0x72, MRM4r, (outs VR128:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "psrad\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_psra_d VR128:$src1,
+ (scalar_to_vector (i32 imm:$src2))))]>;
-defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw", int_x86_sse2_psra_w>;
-defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad", int_x86_sse2_psra_d>;
// PSRAQ doesn't exist in SSE[1-3].
// 128-bit logical shifts.
def : Pat<(fextend (loadf32 addr:$src)),
(CVTSS2SDrm addr:$src)>;
-// Scalar to v8i16 / v16i8. The source may be a GR32, but only the lower 8 or
-// 16-bits matter.
-def : Pat<(v8i16 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
- Requires<[HasSSE2]>;
-def : Pat<(v16i8 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
- Requires<[HasSSE2]>;
-
// bit_convert
let Predicates = [HasSSE2] in {
def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;