setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Legal);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
+ setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Custom);
+ setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
setOperationAction(ISD::SELECT, MVT::v32i1, Custom);
setOperationAction(ISD::SELECT, MVT::v64i1, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i1, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i1, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
setOperationAction(ISD::VSELECT, MVT::v32i16, Legal);
setOperationAction(ISD::VSELECT, MVT::v64i8, Legal);
setOperationAction(ISD::TRUNCATE, MVT::v32i1, Custom);
From.RC:$src1, imm:$idx)>;
}
-// This multiclass generates patterns for matching vextract with common types
-// (X86VectorVTInfo From , X86VectorVTInfo To) and alternative types
-// (X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo)
-multiclass vextract_for_size_all<int Opcode,
- X86VectorVTInfo From, X86VectorVTInfo To,
- X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
- PatFrag vextract_extract,
- SDNodeXForm EXTRACT_get_vextract_imm> :
- vextract_for_size<Opcode, From, To, vextract_extract>,
- vextract_for_size_first_position_lowering<AltFrom, AltTo> {
-
- // Codegen pattern with the alternative types.
- // Only add this if operation not supported natively via AVX512DQ
- let Predicates = [NoDQI] in
- def : Pat<(vextract_extract:$ext (AltFrom.VT AltFrom.RC:$src1), (iPTR imm)),
- (AltTo.VT (!cast<Instruction>(NAME # To.EltSize # "x" #
- To.NumElts # From.ZSuffix # "rr")
- AltFrom.RC:$src1,
- (EXTRACT_get_vextract_imm To.RC:$ext)))>;
+// Codegen pattern for the alternative types
+multiclass vextract_for_size_lowering<string InstrStr, X86VectorVTInfo From,
+ X86VectorVTInfo To, PatFrag vextract_extract,
+ SDNodeXForm EXTRACT_get_vextract_imm, list<Predicate> p> :
+ vextract_for_size_first_position_lowering<From, To> {
+
+ let Predicates = p in
+ def : Pat<(vextract_extract:$ext (From.VT From.RC:$src1), (iPTR imm)),
+ (To.VT (!cast<Instruction>(InstrStr#"rr")
+ From.RC:$src1,
+ (EXTRACT_get_vextract_imm To.RC:$ext)))>;
}
multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
- ValueType EltVT64, int Opcode256> {
- defm NAME # "32x4Z" : vextract_for_size_all<Opcode128,
+ ValueType EltVT64, int Opcode256> {
+ defm NAME # "32x4Z" : vextract_for_size<Opcode128,
X86VectorVTInfo<16, EltVT32, VR512>,
X86VectorVTInfo< 4, EltVT32, VR128X>,
- X86VectorVTInfo< 8, EltVT64, VR512>,
- X86VectorVTInfo< 2, EltVT64, VR128X>,
- vextract128_extract,
- EXTRACT_get_vextract128_imm>,
+ vextract128_extract>,
EVEX_V512, EVEX_CD8<32, CD8VT4>;
- defm NAME # "64x4Z" : vextract_for_size_all<Opcode256,
+ defm NAME # "64x4Z" : vextract_for_size<Opcode256,
X86VectorVTInfo< 8, EltVT64, VR512>,
X86VectorVTInfo< 4, EltVT64, VR256X>,
- X86VectorVTInfo<16, EltVT32, VR512>,
- X86VectorVTInfo< 8, EltVT32, VR256>,
- vextract256_extract,
- EXTRACT_get_vextract256_imm>,
+ vextract256_extract>,
VEX_W, EVEX_V512, EVEX_CD8<64, CD8VT4>;
let Predicates = [HasVLX] in
- defm NAME # "32x4Z256" : vextract_for_size_all<Opcode128,
+ defm NAME # "32x4Z256" : vextract_for_size<Opcode128,
X86VectorVTInfo< 8, EltVT32, VR256X>,
X86VectorVTInfo< 4, EltVT32, VR128X>,
- X86VectorVTInfo< 4, EltVT64, VR256X>,
- X86VectorVTInfo< 2, EltVT64, VR128X>,
- vextract128_extract,
- EXTRACT_get_vextract128_imm>,
+ vextract128_extract>,
EVEX_V256, EVEX_CD8<32, CD8VT4>;
let Predicates = [HasVLX, HasDQI] in
defm NAME # "64x2Z256" : vextract_for_size<Opcode128,
defm VEXTRACTF : vextract_for_type<f32, 0x19, f64, 0x1b>;
defm VEXTRACTI : vextract_for_type<i32, 0x39, i64, 0x3b>;
+// extract_subvector codegen patterns with the alternative types.
+// Only add this if 64x2 and its friends are not supported natively via AVX512DQ.
+defm : vextract_for_size_lowering<"VEXTRACTF32x4Z", v8f64_info, v2f64x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>;
+defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v8i64_info, v2i64x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>;
+
+defm : vextract_for_size_lowering<"VEXTRACTF64x4Z", v16f32_info, v8f32x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512, NoDQI]>;
+defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v16i32_info, v8i32x_info,
+ vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512, NoDQI]>;
+
+defm : vextract_for_size_lowering<"VEXTRACTF32x4Z256", v4f64x_info, v2f64x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>;
+defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v4i64x_info, v2i64x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>;
+
+// Codegen pattern with the alternative types extract VEC128 from VEC512
+defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v32i16_info, v8i16x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
+defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v64i8_info, v16i8x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
+// Codegen pattern with the alternative types extract VEC256 from VEC512
+defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v32i16_info, v16i16x_info,
+ vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
+defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v64i8_info, v32i8x_info,
+ vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
+
// A 128-bit subvector insert to the first 512-bit vector position
// is a subregister copy that needs no instruction.
def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
SSE_INTALU_ITINS_P, HasAVX512>;
defm VPUNPCKHQDQ : avx512_binop_rm_vl_q<0x6D, "vpunpckhqdq", X86Unpckh,
SSE_INTALU_ITINS_P, HasAVX512>;
+
+//===----------------------------------------------------------------------===//
+// AVX-512 - Extract & Insert Integer Instructions
+//===----------------------------------------------------------------------===//
+
+multiclass avx512_extract_elt_bw_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _> {
+ let mayStore = 1 in
+ def mr : AVX512Ii8<opc, MRMDestMem, (outs),
+ (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(store (_.EltVT (trunc (assertzext (OpNode (_.VT _.RC:$src1),
+ imm:$src2)))),
+ addr:$dst)]>,
+ EVEX, EVEX_CD8<_.EltSize, CD8VT1>;
+}
+
+multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> {
+ let Predicates = [HasBWI] in {
+ def rr : AVX512Ii8<0x14, MRMDestReg, (outs GR32orGR64:$dst),
+ (ins _.RC:$src1, u8imm:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32orGR64:$dst,
+ (X86pextrb (_.VT _.RC:$src1), imm:$src2))]>,
+ EVEX, TAPD;
+
+ defm NAME : avx512_extract_elt_bw_m<0x14, OpcodeStr, X86pextrb, _>, TAPD;
+ }
+}
+
+multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
+ let Predicates = [HasBWI] in {
+ def rr : AVX512Ii8<0xC5, MRMSrcReg, (outs GR32orGR64:$dst),
+ (ins _.RC:$src1, u8imm:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32orGR64:$dst,
+ (X86pextrw (_.VT _.RC:$src1), imm:$src2))]>,
+ EVEX, PD;
+
+ defm NAME : avx512_extract_elt_bw_m<0x15, OpcodeStr, X86pextrw, _>, TAPD;
+ }
+}
+
+multiclass avx512_extract_elt_dq<string OpcodeStr, X86VectorVTInfo _,
+ RegisterClass GRC> {
+ let Predicates = [HasDQI] in {
+ def rr : AVX512Ii8<0x16, MRMDestReg, (outs GRC:$dst),
+ (ins _.RC:$src1, u8imm:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GRC:$dst,
+ (extractelt (_.VT _.RC:$src1), imm:$src2))]>,
+ EVEX, TAPD;
+
+ let mayStore = 1 in
+ def mr : AVX512Ii8<0x16, MRMDestMem, (outs),
+ (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(store (extractelt (_.VT _.RC:$src1),
+ imm:$src2),addr:$dst)]>,
+ EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TAPD;
+ }
+}
+
+defm VPEXTRBZ : avx512_extract_elt_b<"vpextrb", v16i8x_info>;
+defm VPEXTRWZ : avx512_extract_elt_w<"vpextrw", v8i16x_info>;
+defm VPEXTRDZ : avx512_extract_elt_dq<"vpextrd", v4i32x_info, GR32>;
+defm VPEXTRQZ : avx512_extract_elt_dq<"vpextrq", v2i64x_info, GR64>, VEX_W;
+
+multiclass avx512_insert_elt_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _, PatFrag LdFrag> {
+ def rm : AVX512Ii8<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
+ OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ [(set _.RC:$dst,
+ (_.VT (OpNode _.RC:$src1, (LdFrag addr:$src2), imm:$src3)))]>,
+ EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
+}
+
+multiclass avx512_insert_elt_bw<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _, PatFrag LdFrag> {
+ let Predicates = [HasBWI] in {
+ def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
+ (ins _.RC:$src1, GR32orGR64:$src2, u8imm:$src3),
+ OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ [(set _.RC:$dst,
+ (OpNode _.RC:$src1, GR32orGR64:$src2, imm:$src3))]>, EVEX_4V;
+
+ defm NAME : avx512_insert_elt_m<opc, OpcodeStr, OpNode, _, LdFrag>;
+ }
+}
+
+multiclass avx512_insert_elt_dq<bits<8> opc, string OpcodeStr,
+ X86VectorVTInfo _, RegisterClass GRC> {
+ let Predicates = [HasDQI] in {
+ def rr : AVX512Ii8<opc, MRMSrcReg, (outs _.RC:$dst),
+ (ins _.RC:$src1, GRC:$src2, u8imm:$src3),
+ OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ [(set _.RC:$dst,
+ (_.VT (insertelt _.RC:$src1, GRC:$src2, imm:$src3)))]>,
+ EVEX_4V, TAPD;
+
+ defm NAME : avx512_insert_elt_m<opc, OpcodeStr, insertelt, _,
+ _.ScalarLdFrag>, TAPD;
+ }
+}
+
+defm VPINSRBZ : avx512_insert_elt_bw<0x20, "vpinsrb", X86pinsrb, v16i8x_info,
+ extloadi8>, TAPD;
+defm VPINSRWZ : avx512_insert_elt_bw<0xC4, "vpinsrw", X86pinsrw, v8i16x_info,
+ extloadi16>, PD;
+defm VPINSRDZ : avx512_insert_elt_dq<0x22, "vpinsrd", v4i32x_info, GR32>;
+defm VPINSRQZ : avx512_insert_elt_dq<0x22, "vpinsrq", v2i64x_info, GR64>, VEX_W;
//===----------------------------------------------------------------------===//
// VSHUFPS - VSHUFPD Operations
//===----------------------------------------------------------------------===//
}
// Extract
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoBWI] in
def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
(outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
"vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Sched<[WriteShuffleLd, ReadAfterLd]>;
// Insert
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoBWI] in
defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V;
let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
imm:$src2)))), addr:$dst)]>;
}
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoBWI] in
defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
imm:$src2)))), addr:$dst)]>;
}
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoBWI] in
defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
addr:$dst)]>;
}
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoDQI] in
defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
addr:$dst)]>, REX_W;
}
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoDQI] in
defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
imm:$src3))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoBWI] in
defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
let Constraints = "$src1 = $dst" in
defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoDQI] in
defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
let Constraints = "$src1 = $dst" in
defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
}
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoDQI] in
defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
let Constraints = "$src1 = $dst" in
defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
%shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 24>
ret <16 x i16> %shuffle
}
+
+define i64 @extract_v2i64(<2 x i64> %x, i64* %dst) {
+ %r1 = extractelement <2 x i64> %x, i32 0
+ %r2 = extractelement <2 x i64> %x, i32 1
+ store i64 %r2, i64* %dst, align 1
+ ret i64 %r1
+}
+
+define i32 @extract_v4i32(<4 x i32> %x, i32* %dst) {
+ %r1 = extractelement <4 x i32> %x, i32 1
+ %r2 = extractelement <4 x i32> %x, i32 3
+ store i32 %r2, i32* %dst, align 1
+ ret i32 %r1
+}
+
+define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) {
+ %r1 = extractelement <8 x i16> %x, i32 1
+ %r2 = extractelement <8 x i16> %x, i32 3
+ store i16 %r2, i16* %dst, align 1
+ ret i16 %r1
+}
+
+define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) {
+ %r1 = extractelement <16 x i8> %x, i32 1
+ %r2 = extractelement <16 x i8> %x, i32 3
+ store i8 %r2, i8* %dst, align 1
+ ret i8 %r1
+}
+
+define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) {
+ %val = load i64, i64* %ptr
+ %r1 = insertelement <2 x i64> %x, i64 %val, i32 1
+ %r2 = insertelement <2 x i64> %r1, i64 %y, i32 3
+ ret <2 x i64> %r2
+}
+
+define <4 x i32> @insert_v4i32(<4 x i32> %x, i32 %y, i32* %ptr) {
+ %val = load i32, i32* %ptr
+ %r1 = insertelement <4 x i32> %x, i32 %val, i32 1
+ %r2 = insertelement <4 x i32> %r1, i32 %y, i32 3
+ ret <4 x i32> %r2
+}
+
+define <8 x i16> @insert_v8i16(<8 x i16> %x, i16 %y, i16* %ptr) {
+ %val = load i16, i16* %ptr
+ %r1 = insertelement <8 x i16> %x, i16 %val, i32 1
+ %r2 = insertelement <8 x i16> %r1, i16 %y, i32 5
+ ret <8 x i16> %r2
+}
+
+define <16 x i8> @insert_v16i8(<16 x i8> %x, i8 %y, i8* %ptr) {
+ %val = load i8, i8* %ptr
+ %r1 = insertelement <16 x i8> %x, i8 %val, i32 3
+ %r2 = insertelement <16 x i8> %r1, i8 %y, i32 10
+ ret <16 x i8> %r2
+}
--- /dev/null
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck --check-prefix=SKX %s
+
+
+define <8 x i16> @extract_subvector128_v32i16(<32 x i16> %x) nounwind {
+; SKX-LABEL: extract_subvector128_v32i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; SKX-NEXT: retq
+ %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <8 x i16> %r1
+}
+
+define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind {
+; SKX-LABEL: extract_subvector128_v32i16_first_element:
+; SKX: ## BB#0:
+; SKX-NEXT: retq
+ %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %r1
+}
+
+define <16 x i8> @extract_subvector128_v64i8(<64 x i8> %x) nounwind {
+; SKX-LABEL: extract_subvector128_v64i8:
+; SKX: ## BB#0:
+; SKX-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; SKX-NEXT: retq
+ %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
+ ret <16 x i8> %r1
+}
+
+define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind {
+; SKX-LABEL: extract_subvector128_v64i8_first_element:
+; SKX: ## BB#0:
+; SKX-NEXT: retq
+ %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %r1
+}
+
+
+define <16 x i16> @extract_subvector256_v32i16(<32 x i16> %x) nounwind {
+; SKX-LABEL: extract_subvector256_v32i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; SKX-NEXT: retq
+ %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ ret <16 x i16> %r1
+}
+
+define <32 x i8> @extract_subvector256_v64i8(<64 x i8> %x) nounwind {
+; SKX-LABEL: extract_subvector256_v64i8:
+; SKX: ## BB#0:
+; SKX-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; SKX-NEXT: retq
+ %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ ret <32 x i8> %r1
+}
ret i8 %x2
}
+define i64 @extract_v8i64(<8 x i64> %x, i64* %dst) {
+; SKX-LABEL: extract_v8i64:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrq $1, %xmm0, %rax
+; SKX-NEXT: vextracti64x2 $1, %zmm0, %xmm0
+; SKX-NEXT: vpextrq $1, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <8 x i64> %x, i32 1
+ %r2 = extractelement <8 x i64> %x, i32 3
+ store i64 %r2, i64* %dst, align 1
+ ret i64 %r1
+}
+
+define i64 @extract_v4i64(<4 x i64> %x, i64* %dst) {
+; SKX-LABEL: extract_v4i64:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrq $1, %xmm0, %rax
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT: vpextrq $1, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <4 x i64> %x, i32 1
+ %r2 = extractelement <4 x i64> %x, i32 3
+ store i64 %r2, i64* %dst, align 1
+ ret i64 %r1
+}
+
+define i64 @extract_v2i64(<2 x i64> %x, i64* %dst) {
+; SKX-LABEL: extract_v2i64:
+; SKX: ## BB#0:
+; SKX-NEXT: vmovq %xmm0, %rax
+; SKX-NEXT: vpextrq $1, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <2 x i64> %x, i32 0
+ %r2 = extractelement <2 x i64> %x, i32 1
+ store i64 %r2, i64* %dst, align 1
+ ret i64 %r1
+}
+
+define i32 @extract_v16i32(<16 x i32> %x, i32* %dst) {
+; SKX-LABEL: extract_v16i32:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrd $1, %xmm0, %eax
+; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; SKX-NEXT: vpextrd $1, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <16 x i32> %x, i32 1
+ %r2 = extractelement <16 x i32> %x, i32 5
+ store i32 %r2, i32* %dst, align 1
+ ret i32 %r1
+}
+
+define i32 @extract_v8i32(<8 x i32> %x, i32* %dst) {
+; SKX-LABEL: extract_v8i32:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrd $1, %xmm0, %eax
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT: vpextrd $1, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <8 x i32> %x, i32 1
+ %r2 = extractelement <8 x i32> %x, i32 5
+ store i32 %r2, i32* %dst, align 1
+ ret i32 %r1
+}
+
+define i32 @extract_v4i32(<4 x i32> %x, i32* %dst) {
+; SKX-LABEL: extract_v4i32:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrd $1, %xmm0, %eax
+; SKX-NEXT: vpextrd $3, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <4 x i32> %x, i32 1
+ %r2 = extractelement <4 x i32> %x, i32 3
+ store i32 %r2, i32* %dst, align 1
+ ret i32 %r1
+}
+
+define i16 @extract_v32i16(<32 x i16> %x, i16* %dst) {
+; SKX-LABEL: extract_v32i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrw $1, %xmm0, %eax
+; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; SKX-NEXT: vpextrw $1, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <32 x i16> %x, i32 1
+ %r2 = extractelement <32 x i16> %x, i32 9
+ store i16 %r2, i16* %dst, align 1
+ ret i16 %r1
+}
+
+define i16 @extract_v16i16(<16 x i16> %x, i16* %dst) {
+; SKX-LABEL: extract_v16i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrw $1, %xmm0, %eax
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT: vpextrw $1, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <16 x i16> %x, i32 1
+ %r2 = extractelement <16 x i16> %x, i32 9
+ store i16 %r2, i16* %dst, align 1
+ ret i16 %r1
+}
+
+define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) {
+; SKX-LABEL: extract_v8i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrw $1, %xmm0, %eax
+; SKX-NEXT: vpextrw $3, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <8 x i16> %x, i32 1
+ %r2 = extractelement <8 x i16> %x, i32 3
+ store i16 %r2, i16* %dst, align 1
+ ret i16 %r1
+}
+
+define i8 @extract_v64i8(<64 x i8> %x, i8* %dst) {
+; SKX-LABEL: extract_v64i8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrb $1, %xmm0, %eax
+; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; SKX-NEXT: vpextrb $1, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <64 x i8> %x, i32 1
+ %r2 = extractelement <64 x i8> %x, i32 17
+ store i8 %r2, i8* %dst, align 1
+ ret i8 %r1
+}
+
+define i8 @extract_v32i8(<32 x i8> %x, i8* %dst) {
+; SKX-LABEL: extract_v32i8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrb $1, %xmm0, %eax
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT: vpextrb $1, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <32 x i8> %x, i32 1
+ %r2 = extractelement <32 x i8> %x, i32 17
+ store i8 %r2, i8* %dst, align 1
+ ret i8 %r1
+}
+
+define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) {
+; SKX-LABEL: extract_v16i8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpextrb $1, %xmm0, %eax
+; SKX-NEXT: vpextrb $3, %xmm0, (%rdi)
+; SKX-NEXT: retq
+ %r1 = extractelement <16 x i8> %x, i32 1
+ %r2 = extractelement <16 x i8> %x, i32 3
+ store i8 %r2, i8* %dst, align 1
+ ret i8 %r1
+}
+
+define <8 x i64> @insert_v8i64(<8 x i64> %x, i64 %y , i64* %ptr) {
+; SKX-LABEL: insert_v8i64:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT: vinserti64x2 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vextracti64x2 $1, %zmm0, %xmm1
+; SKX-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
+; SKX-NEXT: vinserti64x2 $1, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %val = load i64, i64* %ptr
+ %r1 = insertelement <8 x i64> %x, i64 %val, i32 1
+ %r2 = insertelement <8 x i64> %r1, i64 %y, i32 3
+ ret <8 x i64> %r2
+}
+
+define <4 x i64> @insert_v4i64(<4 x i64> %x, i64 %y , i64* %ptr) {
+; SKX-LABEL: insert_v4i64:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; SKX-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
+; SKX-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %val = load i64, i64* %ptr
+ %r1 = insertelement <4 x i64> %x, i64 %val, i32 1
+ %r2 = insertelement <4 x i64> %r1, i64 %y, i32 3
+ ret <4 x i64> %r2
+}
+
+define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) {
+; SKX-LABEL: insert_v2i64:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrq $1, (%rsi), %xmm0, %xmm0
+; SKX-NEXT: vpinsrq $3, %rdi, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %val = load i64, i64* %ptr
+ %r1 = insertelement <2 x i64> %x, i64 %val, i32 1
+ %r2 = insertelement <2 x i64> %r1, i64 %y, i32 3
+ ret <2 x i64> %r2
+}
+
+define <16 x i32> @insert_v16i32(<16 x i32> %x, i32 %y, i32* %ptr) {
+; SKX-LABEL: insert_v16i32:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm1
+; SKX-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
+; SKX-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %val = load i32, i32* %ptr
+ %r1 = insertelement <16 x i32> %x, i32 %val, i32 1
+ %r2 = insertelement <16 x i32> %r1, i32 %y, i32 5
+ ret <16 x i32> %r2
+}
+
+define <8 x i32> @insert_v8i32(<8 x i32> %x, i32 %y, i32* %ptr) {
+; KNL-LABEL: insert_v8i32:
+; KNL: ## BB#0:
+; KNL-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1
+; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
+; KNL-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
+; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_v8i32:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; SKX-NEXT: vpinsrd $1, %edi, %xmm1, %xmm1
+; SKX-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %val = load i32, i32* %ptr
+ %r1 = insertelement <8 x i32> %x, i32 %val, i32 1
+ %r2 = insertelement <8 x i32> %r1, i32 %y, i32 5
+ ret <8 x i32> %r2
+}
+
+define <4 x i32> @insert_v4i32(<4 x i32> %x, i32 %y, i32* %ptr) {
+; KNL-LABEL: insert_v4i32:
+; KNL: ## BB#0:
+; KNL-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm0
+; KNL-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_v4i32:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrd $1, (%rsi), %xmm0, %xmm0
+; SKX-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %val = load i32, i32* %ptr
+ %r1 = insertelement <4 x i32> %x, i32 %val, i32 1
+ %r2 = insertelement <4 x i32> %r1, i32 %y, i32 3
+ ret <4 x i32> %r2
+}
+
+define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) {
+; KNL-LABEL: insert_v32i16:
+; KNL: ## BB#0:
+; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm2
+; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; KNL-NEXT: vpinsrw $1, %edi, %xmm2, %xmm2
+; KNL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_v32i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vextracti32x4 $1, %zmm0, %xmm1
+; SKX-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1
+; SKX-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %val = load i16, i16* %ptr
+ %r1 = insertelement <32 x i16> %x, i16 %val, i32 1
+ %r2 = insertelement <32 x i16> %r1, i16 %y, i32 9
+ ret <32 x i16> %r2
+}
+
+define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, i16* %ptr) {
+; KNL-LABEL: insert_v16i16:
+; KNL: ## BB#0:
+; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
+; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
+; KNL-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1
+; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_v16i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; SKX-NEXT: vpinsrw $1, %edi, %xmm1, %xmm1
+; SKX-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %val = load i16, i16* %ptr
+ %r1 = insertelement <16 x i16> %x, i16 %val, i32 1
+ %r2 = insertelement <16 x i16> %r1, i16 %y, i32 9
+ ret <16 x i16> %r2
+}
+
+define <8 x i16> @insert_v8i16(<8 x i16> %x, i16 %y, i16* %ptr) {
+; KNL-LABEL: insert_v8i16:
+; KNL: ## BB#0:
+; KNL-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm0
+; KNL-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_v8i16:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrw $1, (%rsi), %xmm0, %xmm0
+; SKX-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %val = load i16, i16* %ptr
+ %r1 = insertelement <8 x i16> %x, i16 %val, i32 1
+ %r2 = insertelement <8 x i16> %r1, i16 %y, i32 5
+ ret <8 x i16> %r2
+}
+
+define <64 x i8> @insert_v64i8(<64 x i8> %x, i8 %y, i8* %ptr) {
+; KNL-LABEL: insert_v64i8:
+; KNL: ## BB#0:
+; KNL-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm2
+; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT: vextracti128 $1, %ymm1, %xmm2
+; KNL-NEXT: vpinsrb $2, %edi, %xmm2, %xmm2
+; KNL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_v64i8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
+; SKX-NEXT: vpinsrb $2, %edi, %xmm1, %xmm1
+; SKX-NEXT: vinserti32x4 $3, %xmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %val = load i8, i8* %ptr
+ %r1 = insertelement <64 x i8> %x, i8 %val, i32 1
+ %r2 = insertelement <64 x i8> %r1, i8 %y, i32 50
+ ret <64 x i8> %r2
+}
+
+define <32 x i8> @insert_v32i8(<32 x i8> %x, i8 %y, i8* %ptr) {
+; SKX-LABEL: insert_v32i8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrb $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; SKX-NEXT: vpinsrb $1, %edi, %xmm1, %xmm1
+; SKX-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %val = load i8, i8* %ptr
+ %r1 = insertelement <32 x i8> %x, i8 %val, i32 1
+ %r2 = insertelement <32 x i8> %r1, i8 %y, i32 17
+ ret <32 x i8> %r2
+}
+
+define <16 x i8> @insert_v16i8(<16 x i8> %x, i8 %y, i8* %ptr) {
+; KNL-LABEL: insert_v16i8:
+; KNL: ## BB#0:
+; KNL-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0
+; KNL-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; KNL-NEXT: retq
+;
+; SKX-LABEL: insert_v16i8:
+; SKX: ## BB#0:
+; SKX-NEXT: vpinsrb $3, (%rsi), %xmm0, %xmm0
+; SKX-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %val = load i8, i8* %ptr
+ %r1 = insertelement <16 x i8> %x, i8 %val, i32 3
+ %r2 = insertelement <16 x i8> %r1, i8 %y, i32 10
+ ret <16 x i8> %r2
+}
+
define <8 x i64> @test_insert_128_v8i64(<8 x i64> %x, i64 %y) {
; KNL-LABEL: test_insert_128_v8i64:
; KNL: ## BB#0:
// CHECK: encoding: [0x62,0x61,0x2d,0x40,0x69,0xb2,0xc0,0xdf,0xff,0xff]
vpunpckhwd -8256(%rdx), %zmm26, %zmm30
+// CHECK: vpextrb $171, %xmm17, %eax
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0xc8,0xab]
+ vpextrb $171, %xmm17, %eax
+
+// CHECK: vpextrb $123, %xmm17, %eax
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0xc8,0x7b]
+ vpextrb $123, %xmm17, %eax
+
+// CHECK: vpextrb $123, %xmm17, %r8d
+// CHECK: encoding: [0x62,0xc3,0x7d,0x08,0x14,0xc8,0x7b]
+ vpextrb $123, %xmm17,%r8d
+
+// CHECK: vpextrb $123, %xmm17, (%rcx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x09,0x7b]
+ vpextrb $123, %xmm17, (%rcx)
+
+// CHECK: vpextrb $123, %xmm17, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xa3,0x7d,0x08,0x14,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpextrb $123, %xmm17, 291(%rax,%r14,8)
+
+// CHECK: vpextrb $123, %xmm17, 127(%rdx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x4a,0x7f,0x7b]
+ vpextrb $123, %xmm17, 127(%rdx)
+
+// CHECK: vpextrb $123, %xmm17, 128(%rdx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x8a,0x80,0x00,0x00,0x00,0x7b]
+ vpextrb $123, %xmm17, 128(%rdx)
+
+// CHECK: vpextrb $123, %xmm17, -128(%rdx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x4a,0x80,0x7b]
+ vpextrb $123, %xmm17, -128(%rdx)
+
+// CHECK: vpextrb $123, %xmm17, -129(%rdx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x14,0x8a,0x7f,0xff,0xff,0xff,0x7b]
+ vpextrb $123, %xmm17, -129(%rdx)
+// CHECK: vpinsrb $171, %eax, %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0xc8,0xab]
+ vpinsrb $171,%eax, %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, %eax, %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0xc8,0x7b]
+ vpinsrb $123,%eax, %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, %ebp, %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0xcd,0x7b]
+ vpinsrb $123,%ebp, %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, %r13d, %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x43,0x35,0x00,0x20,0xcd,0x7b]
+ vpinsrb $123,%r13d, %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, (%rcx), %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x09,0x7b]
+ vpinsrb $123, (%rcx), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, 291(%rax,%r14,8), %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x23,0x35,0x00,0x20,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpinsrb $123, 291(%rax,%r14,8), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, 127(%rdx), %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x4a,0x7f,0x7b]
+ vpinsrb $123, 127(%rdx), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, 128(%rdx), %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x8a,0x80,0x00,0x00,0x00,0x7b]
+ vpinsrb $123, 128(%rdx), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, -128(%rdx), %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x4a,0x80,0x7b]
+ vpinsrb $123, -128(%rdx), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, -129(%rdx), %xmm25, %xmm25
+// CHECK: encoding: [0x62,0x63,0x35,0x00,0x20,0x8a,0x7f,0xff,0xff,0xff,0x7b]
+ vpinsrb $123, -129(%rdx), %xmm25, %xmm25
+
+// CHECK: vpinsrw $171, %eax, %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0xd0,0xab]
+ vpinsrw $171,%eax, %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, %eax, %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0xd0,0x7b]
+ vpinsrw $123,%eax, %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, %ebp, %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0xd5,0x7b]
+ vpinsrw $123,%ebp, %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, %r13d, %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xc1,0x35,0x00,0xc4,0xd5,0x7b]
+ vpinsrw $123,%r13d, %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, (%rcx), %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x11,0x7b]
+ vpinsrw $123, (%rcx), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, 291(%rax,%r14,8), %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xa1,0x35,0x00,0xc4,0x94,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpinsrw $123, 291(%rax,%r14,8), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, 254(%rdx), %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x52,0x7f,0x7b]
+ vpinsrw $123, 254(%rdx), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, 256(%rdx), %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x92,0x00,0x01,0x00,0x00,0x7b]
+ vpinsrw $123, 256(%rdx), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, -256(%rdx), %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x52,0x80,0x7b]
+ vpinsrw $123, -256(%rdx), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, -258(%rdx), %xmm25, %xmm18
+// CHECK: encoding: [0x62,0xe1,0x35,0x00,0xc4,0x92,0xfe,0xfe,0xff,0xff,0x7b]
+ vpinsrw $123, -258(%rdx), %xmm25, %xmm18
+
+// CHECK: vpextrw $123, %xmm28, (%rcx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0x21,0x7b]
+ vpextrw $123, %xmm28, (%rcx)
+
+// CHECK: vpextrw $123, %xmm28, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0x23,0x7d,0x08,0x15,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpextrw $123, %xmm28, 291(%rax,%r14,8)
+
+// CHECK: vpextrw $123, %xmm28, 254(%rdx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0x62,0x7f,0x7b]
+ vpextrw $123, %xmm28, 254(%rdx)
+
+// CHECK: vpextrw $123, %xmm28, 256(%rdx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0xa2,0x00,0x01,0x00,0x00,0x7b]
+ vpextrw $123, %xmm28, 256(%rdx)
+
+// CHECK: vpextrw $123, %xmm28, -256(%rdx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0x62,0x80,0x7b]
+ vpextrw $123, %xmm28, -256(%rdx)
+
+// CHECK: vpextrw $123, %xmm28, -258(%rdx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x15,0xa2,0xfe,0xfe,0xff,0xff,0x7b]
+ vpextrw $123, %xmm28, -258(%rdx)
+
+// CHECK: vpextrw $171, %xmm30, %eax
+// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc6,0xab]
+ vpextrw $171, %xmm30,%rax
+
+// CHECK: vpextrw $123, %xmm30, %eax
+// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc6,0x7b]
+ vpextrw $123, %xmm30,%rax
+
+// CHECK: vpextrw $123, %xmm30, %r8d
+// CHECK: encoding: [0x62,0x11,0x7d,0x08,0xc5,0xc6,0x7b]
+ vpextrw $123, %xmm30,%r8
+
+// CHECK: vpextrw $171, %xmm28, %eax
+// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0xab]
+ vpextrw $0xab, %xmm28, %eax
+
+// CHECK: vpextrw $123, %xmm28, %eax
+// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0x7b]
+ vpextrw $0x7b, %xmm28, %eax
+
+// CHECK: vpextrw $123, %xmm28, %r8d
+// CHECK: encoding: [0x62,0x11,0x7d,0x08,0xc5,0xc4,0x7b]
+ vpextrw $0x7b, %xmm28, %r8d
+
+// CHECK: vpextrw $171, %xmm28, %eax
+// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0xab]
+ vpextrw $0xab, %xmm28, %eax
+
+// CHECK: vpextrw $123, %xmm28, %eax
+// CHECK: encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0x7b]
+ vpextrw $0x7b, %xmm28, %eax
+
+// CHECK: vpextrw $123, %xmm28, %r8d
+// CHECK: encoding: [0x62,0x11,0x7d,0x08,0xc5,0xc4,0x7b]
+ vpextrw $0x7b, %xmm28, %r8d
+
+// CHECK: vpextrw $171, %xmm20, %eax
+// CHECK: encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc4,0xab]
+ vpextrw $0xab, %xmm20, %eax
+
+// CHECK: vpextrw $123, %xmm20, %eax
+// CHECK: encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc4,0x7b]
+ vpextrw $0x7b, %xmm20, %eax
+
+// CHECK: vpextrw $123, %xmm20, %r8d
+// CHECK: encoding: [0x62,0x31,0x7d,0x08,0xc5,0xc4,0x7b]
+ vpextrw $0x7b, %xmm20, %r8d
+
+// CHECK: vpextrw $171, %xmm19, %eax
+// CHECK: encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc3,0xab]
+ vpextrw $0xab, %xmm19, %eax
+
+// CHECK: vpextrw $123, %xmm19, %eax
+// CHECK: encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc3,0x7b]
+ vpextrw $0x7b, %xmm19, %eax
+
+// CHECK: vpextrw $123, %xmm19, %r8d
+// CHECK: encoding: [0x62,0x31,0x7d,0x08,0xc5,0xc3,0x7b]
+ vpextrw $0x7b, %xmm19, %r8d
+
// CHECK: kunpckdq %k4, %k6, %k4
// CHECK: encoding: [0xc4,0xe1,0xcc,0x4b,0xe4]
kunpckdq %k4, %k6, %k4
// CHECK: encoding: [0x62,0xa1,0xff,0xca,0x7a,0xd5]
vcvtuqq2ps %zmm21, %ymm18 {%k2} {z}
+// CHECK: vpextrd $171, %xmm28, %eax
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xe0,0xab]
+ vpextrd $0xab, %xmm28, %eax
+
+// CHECK: vpextrd $123, %xmm28, %eax
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xe0,0x7b]
+ vpextrd $0x7b, %xmm28, %eax
+
+// CHECK: vpextrd $123, %xmm28, %ebp
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xe5,0x7b]
+ vpextrd $0x7b, %xmm28, %ebp
+
+// CHECK: vpextrd $123, %xmm28, %r13d
+// CHECK: encoding: [0x62,0x43,0x7d,0x08,0x16,0xe5,0x7b]
+ vpextrd $0x7b, %xmm28, %r13d
+
+// CHECK: vpextrd $123, %xmm28, (%rcx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0x21,0x7b]
+ vpextrd $0x7b, %xmm28, (%rcx)
+
+// CHECK: vpextrd $123, %xmm28, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0x23,0x7d,0x08,0x16,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpextrd $0x7b, %xmm28, 291(%rax,%r14,8)
+
+// CHECK: vpextrd $123, %xmm28, 508(%rdx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0x62,0x7f,0x7b]
+ vpextrd $0x7b, %xmm28, 508(%rdx)
+
+// CHECK: vpextrd $123, %xmm28, 512(%rdx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xa2,0x00,0x02,0x00,0x00,0x7b]
+ vpextrd $0x7b, %xmm28, 512(%rdx)
+
+// CHECK: vpextrd $123, %xmm28, -512(%rdx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0x62,0x80,0x7b]
+ vpextrd $0x7b, %xmm28, -512(%rdx)
+
+// CHECK: vpextrd $123, %xmm28, -516(%rdx)
+// CHECK: encoding: [0x62,0x63,0x7d,0x08,0x16,0xa2,0xfc,0xfd,0xff,0xff,0x7b]
+ vpextrd $0x7b, %xmm28, -516(%rdx)
+
+// CHECK: vpextrd $171, %xmm20, %eax
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xe0,0xab]
+ vpextrd $0xab, %xmm20, %eax
+
+// CHECK: vpextrd $123, %xmm20, %eax
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xe0,0x7b]
+ vpextrd $0x7b, %xmm20, %eax
+
+// CHECK: vpextrd $123, %xmm20, %ebp
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xe5,0x7b]
+ vpextrd $0x7b, %xmm20, %ebp
+
+// CHECK: vpextrd $123, %xmm20, %r13d
+// CHECK: encoding: [0x62,0xc3,0x7d,0x08,0x16,0xe5,0x7b]
+ vpextrd $0x7b, %xmm20, %r13d
+
+// CHECK: vpextrd $123, %xmm20, (%rcx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0x21,0x7b]
+ vpextrd $0x7b, %xmm20, (%rcx)
+
+// CHECK: vpextrd $123, %xmm20, 4660(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xa3,0x7d,0x08,0x16,0xa4,0xf0,0x34,0x12,0x00,0x00,0x7b]
+ vpextrd $0x7b, %xmm20, 4660(%rax,%r14,8)
+
+// CHECK: vpextrd $123, %xmm20, 508(%rdx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0x62,0x7f,0x7b]
+ vpextrd $0x7b, %xmm20, 508(%rdx)
+
+// CHECK: vpextrd $123, %xmm20, 512(%rdx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xa2,0x00,0x02,0x00,0x00,0x7b]
+ vpextrd $0x7b, %xmm20, 512(%rdx)
+
+// CHECK: vpextrd $123, %xmm20, -512(%rdx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0x62,0x80,0x7b]
+ vpextrd $0x7b, %xmm20, -512(%rdx)
+
+// CHECK: vpextrd $123, %xmm20, -516(%rdx)
+// CHECK: encoding: [0x62,0xe3,0x7d,0x08,0x16,0xa2,0xfc,0xfd,0xff,0xff,0x7b]
+ vpextrd $0x7b, %xmm20, -516(%rdx)
+
+// CHECK: vpextrq $171, %xmm24, %rax
+// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0xc0,0xab]
+ vpextrq $0xab, %xmm24, %rax
+
+// CHECK: vpextrq $123, %xmm24, %rax
+// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0xc0,0x7b]
+ vpextrq $0x7b, %xmm24, %rax
+
+// CHECK: vpextrq $123, %xmm24, %r8
+// CHECK: encoding: [0x62,0x43,0xfd,0x08,0x16,0xc0,0x7b]
+ vpextrq $0x7b, %xmm24, %r8
+
+// CHECK: vpextrq $123, %xmm24, (%rcx)
+// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x01,0x7b]
+ vpextrq $0x7b, %xmm24, (%rcx)
+
+// CHECK: vpextrq $123, %xmm24, 291(%rax,%r14,8)
+// CHECK: encoding: [0x62,0x23,0xfd,0x08,0x16,0x84,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpextrq $0x7b, %xmm24, 291(%rax,%r14,8)
+
+// CHECK: vpextrq $123, %xmm24, 1016(%rdx)
+// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x42,0x7f,0x7b]
+ vpextrq $0x7b, %xmm24, 1016(%rdx)
+
+// CHECK: vpextrq $123, %xmm24, 1024(%rdx)
+// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x82,0x00,0x04,0x00,0x00,0x7b]
+ vpextrq $0x7b, %xmm24, 1024(%rdx)
+
+// CHECK: vpextrq $123, %xmm24, -1024(%rdx)
+// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x42,0x80,0x7b]
+ vpextrq $0x7b, %xmm24, -1024(%rdx)
+
+// CHECK: vpextrq $123, %xmm24, -1032(%rdx)
+// CHECK: encoding: [0x62,0x63,0xfd,0x08,0x16,0x82,0xf8,0xfb,0xff,0xff,0x7b]
+ vpextrq $0x7b, %xmm24, -1032(%rdx)
+
+// CHECK: vpextrq $171, %xmm20, %rax
+// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0xe0,0xab]
+ vpextrq $0xab, %xmm20, %rax
+
+// CHECK: vpextrq $123, %xmm20, %rax
+// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0xe0,0x7b]
+ vpextrq $0x7b, %xmm20, %rax
+
+// CHECK: vpextrq $123, %xmm20, %r8
+// CHECK: encoding: [0x62,0xc3,0xfd,0x08,0x16,0xe0,0x7b]
+ vpextrq $0x7b, %xmm20, %r8
+
+// CHECK: vpextrq $123, %xmm20, (%rcx)
+// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0x21,0x7b]
+ vpextrq $0x7b, %xmm20, (%rcx)
+
+// CHECK: vpextrq $123, %xmm20, 4660(%rax,%r14,8)
+// CHECK: encoding: [0x62,0xa3,0xfd,0x08,0x16,0xa4,0xf0,0x34,0x12,0x00,0x00,0x7b]
+ vpextrq $0x7b, %xmm20, 4660(%rax,%r14,8)
+
+// CHECK: vpextrq $123, %xmm20, 1016(%rdx)
+// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0x62,0x7f,0x7b]
+ vpextrq $0x7b, %xmm20, 1016(%rdx)
+
+// CHECK: vpextrq $123, %xmm20, 1024(%rdx)
+// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0xa2,0x00,0x04,0x00,0x00,0x7b]
+ vpextrq $0x7b, %xmm20, 1024(%rdx)
+
+// CHECK: vpextrq $123, %xmm20, -1024(%rdx)
+// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0x62,0x80,0x7b]
+ vpextrq $0x7b, %xmm20, -1024(%rdx)
+
+// CHECK: vpextrq $123, %xmm20, -1032(%rdx)
+// CHECK: encoding: [0x62,0xe3,0xfd,0x08,0x16,0xa2,0xf8,0xfb,0xff,0xff,0x7b]
+ vpextrq $0x7b, %xmm20, -1032(%rdx)
+
+// CHECK: vpinsrd $171, %eax, %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xf8,0xab]
+ vpinsrd $0xab,%eax, %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, %eax, %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xf8,0x7b]
+ vpinsrd $0x7b,%eax, %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, %ebp, %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xfd,0x7b]
+ vpinsrd $0x7b,%ebp, %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, %r13d, %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xc3,0x35,0x00,0x22,0xfd,0x7b]
+ vpinsrd $0x7b,%r13d, %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, (%rcx), %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0x39,0x7b]
+ vpinsrd $0x7b,(%rcx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, 291(%rax,%r14,8), %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xa3,0x35,0x00,0x22,0xbc,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpinsrd $0x7b,291(%rax,%r14,8), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, 508(%rdx), %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0x7a,0x7f,0x7b]
+ vpinsrd $0x7b,508(%rdx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, 512(%rdx), %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xba,0x00,0x02,0x00,0x00,0x7b]
+ vpinsrd $0x7b,512(%rdx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, -512(%rdx), %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0x7a,0x80,0x7b]
+ vpinsrd $0x7b,-512(%rdx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, -516(%rdx), %xmm25, %xmm23
+// CHECK: encoding: [0x62,0xe3,0x35,0x00,0x22,0xba,0xfc,0xfd,0xff,0xff,0x7b]
+ vpinsrd $0x7b,-516(%rdx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $171, %eax, %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xf0,0xab]
+ vpinsrd $0xab,%eax, %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, %eax, %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xf0,0x7b]
+ vpinsrd $0x7b,%eax, %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, %ebp, %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xf5,0x7b]
+ vpinsrd $0x7b,%ebp, %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, %r13d, %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xc3,0x15,0x00,0x22,0xf5,0x7b]
+ vpinsrd $0x7b,%r13d, %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, (%rcx), %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0x31,0x7b]
+ vpinsrd $0x7b,(%rcx), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, 4660(%rax,%r14,8), %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xa3,0x15,0x00,0x22,0xb4,0xf0,0x34,0x12,0x00,0x00,0x7b]
+ vpinsrd $0x7b,4660(%rax,%r14,8), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, 508(%rdx), %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0x72,0x7f,0x7b]
+ vpinsrd $0x7b,508(%rdx), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, 512(%rdx), %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xb2,0x00,0x02,0x00,0x00,0x7b]
+ vpinsrd $0x7b,512(%rdx), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, -512(%rdx), %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0x72,0x80,0x7b]
+ vpinsrd $0x7b,-512(%rdx), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, -516(%rdx), %xmm29, %xmm22
+// CHECK: encoding: [0x62,0xe3,0x15,0x00,0x22,0xb2,0xfc,0xfd,0xff,0xff,0x7b]
+ vpinsrd $0x7b,-516(%rdx), %xmm29, %xmm22
+
+// CHECK: vpinsrq $171, %rax, %xmm20, %xmm22
+// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0xf0,0xab]
+ vpinsrq $0xab,%rax, %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, %rax, %xmm20, %xmm22
+// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0xf0,0x7b]
+ vpinsrq $0x7b,%rax, %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, %r8, %xmm20, %xmm22
+// CHECK: encoding: [0x62,0xc3,0xdd,0x00,0x22,0xf0,0x7b]
+ vpinsrq $0x7b,%r8, %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, (%rcx), %xmm20, %xmm22
+// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0x31,0x7b]
+ vpinsrq $0x7b,(%rcx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, 291(%rax,%r14,8), %xmm20, %xmm22
+// CHECK: encoding: [0x62,0xa3,0xdd,0x00,0x22,0xb4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+ vpinsrq $0x7b,291(%rax,%r14,8), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, 1016(%rdx), %xmm20, %xmm22
+// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0x72,0x7f,0x7b]
+ vpinsrq $0x7b,1016(%rdx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, 1024(%rdx), %xmm20, %xmm22
+// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0xb2,0x00,0x04,0x00,0x00,0x7b]
+ vpinsrq $0x7b,1024(%rdx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, -1024(%rdx), %xmm20, %xmm22
+// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0x72,0x80,0x7b]
+ vpinsrq $0x7b,-1024(%rdx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, -1032(%rdx), %xmm20, %xmm22
+// CHECK: encoding: [0x62,0xe3,0xdd,0x00,0x22,0xb2,0xf8,0xfb,0xff,0xff,0x7b]
+ vpinsrq $0x7b,-1032(%rdx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $171, %rax, %xmm19, %xmm25
+// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0xc8,0xab]
+ vpinsrq $0xab,%rax, %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, %rax, %xmm19, %xmm25
+// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0xc8,0x7b]
+ vpinsrq $0x7b,%rax, %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, %r8, %xmm19, %xmm25
+// CHECK: encoding: [0x62,0x43,0xe5,0x00,0x22,0xc8,0x7b]
+ vpinsrq $0x7b,%r8, %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, (%rcx), %xmm19, %xmm25
+// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x09,0x7b]
+ vpinsrq $0x7b,(%rcx), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, 4660(%rax,%r14,8), %xmm19, %xmm25
+// CHECK: encoding: [0x62,0x23,0xe5,0x00,0x22,0x8c,0xf0,0x34,0x12,0x00,0x00,0x7b]
+ vpinsrq $0x7b,4660(%rax,%r14,8), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, 1016(%rdx), %xmm19, %xmm25
+// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x4a,0x7f,0x7b]
+ vpinsrq $0x7b,1016(%rdx), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, 1024(%rdx), %xmm19, %xmm25
+// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x8a,0x00,0x04,0x00,0x00,0x7b]
+ vpinsrq $0x7b,1024(%rdx), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, -1024(%rdx), %xmm19, %xmm25
+// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x4a,0x80,0x7b]
+ vpinsrq $0x7b,-1024(%rdx), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, -1032(%rdx), %xmm19, %xmm25
+// CHECK: encoding: [0x62,0x63,0xe5,0x00,0x22,0x8a,0xf8,0xfb,0xff,0xff,0x7b]
+ vpinsrq $0x7b,-1032(%rdx), %xmm19, %xmm25
+
// CHECK: vinsertf32x8 $171, %ymm24, %zmm17, %zmm29
// CHECK: encoding: [0x62,0x03,0x75,0x40,0x1a,0xe8,0xab]
vinsertf32x8 $0xab, %ymm24, %zmm17, %zmm29