[SDNPHasChain, SDNPOutFlag]>;
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest,
[SDNPHasChain, SDNPOutFlag]>;
+def X86comi_new: SDNode<"X86ISD::COMI_NEW", SDTX86CmpTest,
+ [SDNPHasChain]>;
+def X86ucomi_new: SDNode<"X86ISD::UCOMI_NEW",SDTX86CmpTest>;
def X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>;
def X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>;
def X86pinsrw : SDNode<"X86ISD::PINSRW", SDTypeProfile<1, 3, []>, []>;
Requires<[HasSSE1]>;
def IMPLICIT_DEF_FR32 : I<0, Pseudo, (outs FR32:$dst), (ins),
"#IMPLICIT_DEF $dst",
- [(set FR32:$dst, (undef))]>, Requires<[HasSSE2]>;
+ [(set FR32:$dst, (undef))]>, Requires<[HasSSE1]>;
def IMPLICIT_DEF_FR64 : I<0, Pseudo, (outs FR64:$dst), (ins),
"#IMPLICIT_DEF $dst",
[(set FR64:$dst, (undef))]>, Requires<[HasSSE2]>;
def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
-def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop node:$ptr))>;
-def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
-def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop node:$ptr))>;
-def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop node:$ptr))>;
-def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop node:$ptr))>;
def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
+// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
+// 16-byte boundary.
+def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
+ return LD->getExtensionType() == ISD::NON_EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getAlignment() >= 8;
+ return false;
+}]>;
+
+def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
+def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop64 node:$ptr))>;
+def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
+def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
+def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
+
def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
// scheduler into a branch sequence.
-let usesCustomDAGSchedInserter = 1 in { // Expanded by the scheduler.
+// These are expanded by the scheduler.
+let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
def CMOV_FR32 : I<0, Pseudo,
(outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
"#CMOV_FR32 PSEUDO!",
"#CMOV_V2I64 PSEUDO!",
[(set VR128:$dst,
(v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>;
+
+ def NEW_CMOV_FR32 : I<0, Pseudo,
+ (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
+ "#CMOV_FR32 PSEUDO!",
+ [(set FR32:$dst, (X86cmov_new FR32:$t, FR32:$f, imm:$cond,
+ EFLAGS))]>;
+ def NEW_CMOV_FR64 : I<0, Pseudo,
+ (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
+ "#CMOV_FR64 PSEUDO!",
+ [(set FR64:$dst, (X86cmov_new FR64:$t, FR64:$f, imm:$cond,
+ EFLAGS))]>;
+ def NEW_CMOV_V4F32 : I<0, Pseudo,
+ (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
+ "#CMOV_V4F32 PSEUDO!",
+ [(set VR128:$dst,
+ (v4f32 (X86cmov_new VR128:$t, VR128:$f, imm:$cond,
+ EFLAGS)))]>;
+ def NEW_CMOV_V2F64 : I<0, Pseudo,
+ (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
+ "#CMOV_V2F64 PSEUDO!",
+ [(set VR128:$dst,
+ (v2f64 (X86cmov_new VR128:$t, VR128:$f, imm:$cond,
+ EFLAGS)))]>;
+ def NEW_CMOV_V2I64 : I<0, Pseudo,
+ (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
+ "#CMOV_V2I64 PSEUDO!",
+ [(set VR128:$dst,
+ (v2i64 (X86cmov_new VR128:$t, VR128:$f, imm:$cond,
+ EFLAGS)))]>;
}
//===----------------------------------------------------------------------===//
// Move Instructions
def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
"movss\t{$src, $dst|$dst, $src}", []>;
-let isReMaterializable = 1 in
+let isLoad = 1, isReMaterializable = 1 in
def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
"movss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (loadf32 addr:$src))]>;
"cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
}
+let Defs = [EFLAGS] in {
def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
"ucomiss\t{$src2, $src1|$src1, $src2}",
[(X86cmp FR32:$src1, FR32:$src2)]>;
"ucomiss\t{$src2, $src1|$src1, $src2}",
[(X86cmp FR32:$src1, (loadf32 addr:$src2))]>;
+def NEW_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
+ "ucomiss\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
+def NEW_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
+ "ucomiss\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new FR32:$src1, (loadf32 addr:$src2)),
+ (implicit EFLAGS)]>;
+} // Defs = [EFLAGS]
+
// Aliases to match intrinsics which expect XMM operand(s).
let isTwoAddress = 1 in {
def Int_CMPSSrr : SSI<0xC2, MRMSrcReg,
(load addr:$src), imm:$cc))]>;
}
+let Defs = [EFLAGS] in {
def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
"ucomiss\t{$src2, $src1|$src1, $src2}",
[(X86ucomi (v4f32 VR128:$src1), VR128:$src2)]>;
"comiss\t{$src2, $src1|$src1, $src2}",
[(X86comi (v4f32 VR128:$src1), (load addr:$src2))]>;
+def NEW_Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2),
+ "ucomiss\t{$src2, $src1|$src1, $src2}",
+ [(X86ucomi_new (v4f32 VR128:$src1), VR128:$src2),
+ (implicit EFLAGS)]>;
+def NEW_Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),
+ (ins VR128:$src1, f128mem:$src2),
+ "ucomiss\t{$src2, $src1|$src1, $src2}",
+ [(X86ucomi_new (v4f32 VR128:$src1), (load addr:$src2)),
+ (implicit EFLAGS)]>;
+
+def NEW_Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2),
+ "comiss\t{$src2, $src1|$src1, $src2}",
+ [(X86comi_new (v4f32 VR128:$src1), VR128:$src2),
+ (implicit EFLAGS)]>;
+def NEW_Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs),
+ (ins VR128:$src1, f128mem:$src2),
+ "comiss\t{$src2, $src1|$src1, $src2}",
+ [(X86comi_new (v4f32 VR128:$src1), (load addr:$src2)),
+ (implicit EFLAGS)]>;
+} // Defs = [EFLAGS]
+
// Aliases of packed SSE1 instructions for scalar use. These all have names that
// start with 'Fs'.
// Alias instructions that map fld0 to pxor for sse.
+let isReMaterializable = 1 in
def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
"pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
Requires<[HasSSE1]>, TB, OpSize;
// Alias instruction to load FR32 from f128mem using movaps. Upper bits are
// disregarded.
+let isLoad = 1 in
def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
"movaps\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
// Move Instructions
def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movaps\t{$src, $dst|$dst, $src}", []>;
-let isReMaterializable = 1 in
+let isLoad = 1, isReMaterializable = 1 in
def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movaps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movups\t{$src, $dst|$dst, $src}", []>;
+let isLoad = 1 in
def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movups\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (loadv4f32 addr:$src))]>;
[(store (v4f32 VR128:$src), addr:$dst)]>;
// Intrinsic forms of MOVUPS load and store
+let isLoad = 1 in
def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movups\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
// Move Instructions
def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
"movsd\t{$src, $dst|$dst, $src}", []>;
-let isReMaterializable = 1 in
+let isLoad = 1, isReMaterializable = 1 in
def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
"movsd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (loadf64 addr:$src))]>;
"cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
}
+let Defs = [EFLAGS] in {
def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
"ucomisd\t{$src2, $src1|$src1, $src2}",
[(X86cmp FR64:$src1, FR64:$src2)]>;
"ucomisd\t{$src2, $src1|$src1, $src2}",
[(X86cmp FR64:$src1, (loadf64 addr:$src2))]>;
+def NEW_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
+ "ucomisd\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
+def NEW_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
+ "ucomisd\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new FR64:$src1, (loadf64 addr:$src2)),
+ (implicit EFLAGS)]>;
+}
+
// Aliases to match intrinsics which expect XMM operand(s).
let isTwoAddress = 1 in {
def Int_CMPSDrr : SDI<0xC2, MRMSrcReg,
(load addr:$src), imm:$cc))]>;
}
+let Defs = [EFLAGS] in {
def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
"ucomisd\t{$src2, $src1|$src1, $src2}",
[(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2))]>;
"comisd\t{$src2, $src1|$src1, $src2}",
[(X86comi (v2f64 VR128:$src1), (load addr:$src2))]>;
+def NEW_Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2),
+ "ucomisd\t{$src2, $src1|$src1, $src2}",
+ [(X86ucomi_new (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
+ (implicit EFLAGS)]>;
+def NEW_Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),
+ (ins VR128:$src1, f128mem:$src2),
+ "ucomisd\t{$src2, $src1|$src1, $src2}",
+ [(X86ucomi_new (v2f64 VR128:$src1), (load addr:$src2)),
+ (implicit EFLAGS)]>;
+
+def NEW_Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2),
+ "comisd\t{$src2, $src1|$src1, $src2}",
+ [(X86comi_new (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
+ (implicit EFLAGS)]>;
+def NEW_Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs),
+ (ins VR128:$src1, f128mem:$src2),
+ "comisd\t{$src2, $src1|$src1, $src2}",
+ [(X86comi_new (v2f64 VR128:$src1), (load addr:$src2)),
+ (implicit EFLAGS)]>;
+} // Defs = EFLAGS]
+
// Aliases of packed SSE2 instructions for scalar use. These all have names that
// start with 'Fs'.
// Alias instructions that map fld0 to pxor for sse.
+let isReMaterializable = 1 in
def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
"pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
Requires<[HasSSE2]>, TB, OpSize;
// Alias instruction to load FR64 from f128mem using movapd. Upper bits are
// disregarded.
+let isLoad = 1 in
def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
"movapd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
// Move Instructions
def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movapd\t{$src, $dst|$dst, $src}", []>;
-let isReMaterializable = 1 in
+let isLoad = 1, isReMaterializable = 1 in
def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movapd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movupd\t{$src, $dst|$dst, $src}", []>;
+let isLoad = 1 in
def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movupd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (loadv2f64 addr:$src))]>;
// Move Instructions
def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>;
+let isLoad = 1 in
def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}",
[/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
+let isLoad = 1 in
def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
XS, Requires<[HasSSE2]>;
// Intrinsic forms of MOVDQU load and store
+let isLoad = 1 in
def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
[(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
// Conditional store
+let Uses = [EDI] in
def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>,
- Imp<[EDI],[]>;
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
// Non-temporal stores
def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
// SSSE3 Instructions
//===----------------------------------------------------------------------===//
-// SSE3 Instruction Templates:
+// SSSE3 Instruction Templates:
//
// SS38I - SSSE3 instructions with T8 prefix.
// SS3AI - SSSE3 instructions with TA prefix.
+//
+// Note: SSSE3 instructions have 64-bit and 128-bit versions. The 64-bit version
+// uses the MMX registers. We put those instructions here because they better
+// fit into the SSSE3 instruction category rather than the MMX category.
class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
int_x86_ssse3_psign_d_128>;
let isTwoAddress = 1 in {
- def PALIGN64rr : SS38I<0x0F, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2, i16imm:$src3),
- "palignr\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (int_x86_ssse3_palign_r
- VR64:$src1, VR64:$src2,
- imm:$src3))]>;
- def PALIGN64rm : SS38I<0x0F, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
- "palignr\t{$src2, $dst|$dst, $src2}",
- [(set VR64:$dst,
- (int_x86_ssse3_palign_r
- VR64:$src1,
- (bitconvert (memopv2i32 addr:$src2)),
- imm:$src3))]>;
-
- def PALIGN128rr : SS38I<0x0F, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i32imm:$src3),
- "palignr\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (int_x86_ssse3_palign_r_128
- VR128:$src1, VR128:$src2,
- imm:$src3))]>, OpSize;
- def PALIGN128rm : SS38I<0x0F, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
- "palignr\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (int_x86_ssse3_palign_r_128
- VR128:$src1,
- (bitconvert (memopv4i32 addr:$src2)),
- imm:$src3))]>, OpSize;
+ def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
+ (ins VR64:$src1, VR64:$src2, i16imm:$src3),
+ "palignr\t{$src2, $dst|$dst, $src2}",
+ [(set VR64:$dst,
+ (int_x86_ssse3_palign_r
+ VR64:$src1, VR64:$src2,
+ imm:$src3))]>;
+ def PALIGNR64rm : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
+ (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
+ "palignr\t{$src2, $dst|$dst, $src2}",
+ [(set VR64:$dst,
+ (int_x86_ssse3_palign_r
+ VR64:$src1,
+ (bitconvert (memopv2i32 addr:$src2)),
+ imm:$src3))]>;
+
+ def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i32imm:$src3),
+ "palignr\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (int_x86_ssse3_palign_r_128
+ VR128:$src1, VR128:$src2,
+ imm:$src3))]>, OpSize;
+ def PALIGNR128rm : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
+ "palignr\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (int_x86_ssse3_palign_r_128
+ VR128:$src1,
+ (bitconvert (memopv4i32 addr:$src2)),
+ imm:$src3))]>, OpSize;
}
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// 128-bit vector undef's.
+def : Pat<(v4f32 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
def : Pat<(v2f64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
def : Pat<(v16i8 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
def : Pat<(v8i16 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;