for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Legal);
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
setOperationAction(ISD::AND, VT, Promote);
AddPromotedToType (ISD::AND, VT, MVT::v8i64);
Op.getOpcode() == ISD::SRA && !Subtarget->hasXOP())
return ArithmeticShiftRight64(ShiftAmt);
- if (VT == MVT::v16i8 || (Subtarget->hasInt256() && VT == MVT::v32i8)) {
+ if (VT == MVT::v16i8 ||
+ (Subtarget->hasInt256() && VT == MVT::v32i8) ||
+ VT == MVT::v64i8) {
unsigned NumElts = VT.getVectorNumElements();
MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
R, ShiftAmt, DAG);
SHL = DAG.getBitcast(VT, SHL);
// Zero out the rightmost bits.
- SmallVector<SDValue, 32> V(
- NumElts, DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SHL,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
+ DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT));
}
if (Op.getOpcode() == ISD::SRL) {
// Make a large shift.
R, ShiftAmt, DAG);
SRL = DAG.getBitcast(VT, SRL);
// Zero out the leftmost bits.
- SmallVector<SDValue, 32> V(
- NumElts, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, SRL,
- DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
+ DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
}
if (Op.getOpcode() == ISD::SRA) {
// ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
- SmallVector<SDValue, 32> V(NumElts,
- DAG.getConstant(128 >> ShiftAmt, dl,
- MVT::i8));
- SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
+
+ SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
return Res;
avx512vl_i64_info>, VEX_W;
}
+// Use 512bit version to implement 128/256 bit in case NoVLX.
+multiclass avx512_var_shift_w_lowering<AVX512VLVectorVTInfo _, SDNode OpNode> {
+ let Predicates = [HasBWI, NoVLX] in {
+ def : Pat<(_.info256.VT (OpNode (_.info256.VT _.info256.RC:$src1),
+ (_.info256.VT _.info256.RC:$src2))),
+ (EXTRACT_SUBREG
+ (!cast<Instruction>(NAME#"WZrr")
+ (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src1, sub_ymm),
+ (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src2, sub_ymm)),
+ sub_ymm)>;
+
+ def : Pat<(_.info128.VT (OpNode (_.info128.VT _.info128.RC:$src1),
+ (_.info128.VT _.info128.RC:$src2))),
+ (EXTRACT_SUBREG
+ (!cast<Instruction>(NAME#"WZrr")
+ (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src1, sub_xmm),
+ (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src2, sub_xmm)),
+ sub_xmm)>;
+ }
+}
+
multiclass avx512_var_shift_w<bits<8> opc, string OpcodeStr,
SDNode OpNode> {
let Predicates = [HasBWI] in
}
defm VPSLLV : avx512_var_shift_types<0x47, "vpsllv", shl>,
- avx512_var_shift_w<0x12, "vpsllvw", shl>;
+ avx512_var_shift_w<0x12, "vpsllvw", shl>,
+ avx512_var_shift_w_lowering<avx512vl_i16_info, shl>;
defm VPSRAV : avx512_var_shift_types<0x46, "vpsrav", sra>,
- avx512_var_shift_w<0x11, "vpsravw", sra>;
+ avx512_var_shift_w<0x11, "vpsravw", sra>,
+ avx512_var_shift_w_lowering<avx512vl_i16_info, sra>;
defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", srl>,
- avx512_var_shift_w<0x10, "vpsrlvw", srl>;
+ avx512_var_shift_w<0x10, "vpsrlvw", srl>,
+ avx512_var_shift_w_lowering<avx512vl_i16_info, srl>;
defm VPRORV : avx512_var_shift_types<0x14, "vprorv", rotr>;
defm VPROLV : avx512_var_shift_types<0x15, "vprolv", rotl>;
//===---------------------------------------------------------------------===//
let Predicates = [HasAVX, NoVLX] in {
-defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
- VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
-defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
- VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
-defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
- VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
-} // Predicates = [HasAVX]
+} // Predicates = [HasAVX, NoVLX]
+
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
+defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
+ VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
+ VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
+ VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+} // Predicates = [HasAVX, NoVLX_Or_NoBWI]
+
let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] ,
Predicates = [HasAVX, NoVLX_Or_NoBWI]in {
} // Predicates = [HasAVX, NoVLX_Or_NoBWI]
let Predicates = [HasAVX2, NoVLX] in {
-defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
- VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
-defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
- VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
-defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
- VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
- SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
-}// Predicates = [HasAVX2]
+}// Predicates = [HasAVX2, NoVLX]
+
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
+defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
+ VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
+defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
+ VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
+defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
+ VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
+}// Predicates = [HasAVX2, NoVLX_Or_NoBWI]
let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 ,
Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
VEX_4V, VEX_L, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
}
-defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
-defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
-defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
-defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
-defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
-
+let Predicates = [HasAVX2, NoVLX] in {
+ defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
+ defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
+ defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
+ defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
+ defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
+}
//===----------------------------------------------------------------------===//
// VGATHER - GATHER Operations
multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
ret <4 x double> %shuffle
}
+define <8 x i32> @ashr_v8i32(<8 x i32> %a, <8 x i32> %b) {
+ %shift = ashr <8 x i32> %a, %b
+ ret <8 x i32> %shift
+}
+
+define <8 x i32> @lshr_v8i32(<8 x i32> %a, <8 x i32> %b) {
+ %shift = lshr <8 x i32> %a, %b
+ ret <8 x i32> %shift
+}
+
+define <8 x i32> @shl_v8i32(<8 x i32> %a, <8 x i32> %b) {
+ %shift = shl <8 x i32> %a, %b
+ ret <8 x i32> %shift
+}
+
+define <8 x i32> @ashr_const_v8i32(<8 x i32> %a) {
+ %shift = ashr <8 x i32> %a, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ ret <8 x i32> %shift
+}
+
+define <8 x i32> @lshr_const_v8i32(<8 x i32> %a) {
+ %shift = lshr <8 x i32> %a, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ ret <8 x i32> %shift
+}
+
+define <8 x i32> @shl_const_v8i32(<8 x i32> %a) {
+ %shift = shl <8 x i32> %a, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ ret <8 x i32> %shift
+}
+
+define <4 x i64> @ashr_v4i64(<4 x i64> %a, <4 x i64> %b) {
+ %shift = ashr <4 x i64> %a, %b
+ ret <4 x i64> %shift
+}
+
+define <4 x i64> @lshr_v4i64(<4 x i64> %a, <4 x i64> %b) {
+ %shift = lshr <4 x i64> %a, %b
+ ret <4 x i64> %shift
+}
+
+define <4 x i64> @shl_v4i64(<4 x i64> %a, <4 x i64> %b) {
+ %shift = shl <4 x i64> %a, %b
+ ret <4 x i64> %shift
+}
+
+define <4 x i64> @ashr_const_v4i64(<4 x i64> %a) {
+ %shift = ashr <4 x i64> %a, <i64 3, i64 3, i64 3, i64 3>
+ ret <4 x i64> %shift
+}
+
+define <4 x i64> @lshr_const_v4i64(<4 x i64> %a) {
+ %shift = lshr <4 x i64> %a, <i64 3, i64 3, i64 3, i64 3>
+ ret <4 x i64> %shift
+}
+
+define <4 x i64> @shl_const_v4i64(<4 x i64> %a) {
+ %shift = shl <4 x i64> %a, <i64 3, i64 3, i64 3, i64 3>
+ ret <4 x i64> %shift
+}
+
+define <16 x i16> @ashr_v16i16(<16 x i16> %a, <16 x i16> %b) {
+ %shift = ashr <16 x i16> %a, %b
+ ret <16 x i16> %shift
+}
+
+define <16 x i16> @lshr_v16i16(<16 x i16> %a, <16 x i16> %b) {
+ %shift = lshr <16 x i16> %a, %b
+ ret <16 x i16> %shift
+}
+
+define <16 x i16> @shl_v16i16(<16 x i16> %a, <16 x i16> %b) {
+ %shift = shl <16 x i16> %a, %b
+ ret <16 x i16> %shift
+}
+
+define <16 x i16> @ashr_const_v16i16(<16 x i16> %a) {
+ %shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <16 x i16> %shift
+}
+
+define <16 x i16> @lshr_const_v16i16(<16 x i16> %a) {
+ %shift = lshr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <16 x i16> %shift
+}
+
+define <16 x i16> @shl_const_v16i16(<16 x i16> %a) {
+ %shift = shl <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <16 x i16> %shift
+}
+
+define <4 x i32> @ashr_v4i32(<4 x i32> %a, <4 x i32> %b) {
+ %shift = ashr <4 x i32> %a, %b
+ ret <4 x i32> %shift
+}
+
+define <4 x i32> @shl_const_v4i32(<4 x i32> %a) {
+ %shift = shl <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %shift
+}
+
+define <2 x i64> @ashr_v2i64(<2 x i64> %a, <2 x i64> %b) {
+ %shift = ashr <2 x i64> %a, %b
+ ret <2 x i64> %shift
+}
+
+define <2 x i64> @shl_const_v2i64(<2 x i64> %a) {
+ %shift = shl <2 x i64> %a, <i64 3, i64 3>
+ ret <2 x i64> %shift
+}
+
+define <8 x i16> @ashr_v8i16(<8 x i16> %a, <8 x i16> %b) {
+ %shift = ashr <8 x i16> %a, %b
+ ret <8 x i16> %shift
+}
+
+define <8 x i16> @lshr_v8i16(<8 x i16> %a, <8 x i16> %b) {
+ %shift = lshr <8 x i16> %a, %b
+ ret <8 x i16> %shift
+}
+
+define <8 x i16> @shl_v8i16(<8 x i16> %a, <8 x i16> %b) {
+ %shift = shl <8 x i16> %a, %b
+ ret <8 x i16> %shift
+}
+
+define <8 x i16> @ashr_const_v8i16(<8 x i16> %a) {
+ %shift = ashr <8 x i16> %a,<i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %shift
+}
+
+define <8 x i16> @lshr_const_v8i16(<8 x i16> %a) {
+ %shift = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %shift
+}
+
+define <8 x i16> @shl_const_v8i16(<8 x i16> %a) {
+ %shift = shl <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %shift
+}
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
+
;
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: var_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX512-NEXT: vpsrlvq %xmm1, %xmm2, %xmm3
+; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsubq %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: var_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: var_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $12, %xmm1
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: var_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512-NEXT: vpsraw $4, %xmm3, %xmm4
+; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT: vpsraw $2, %xmm3, %xmm4
+; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT: vpsraw $1, %xmm3, %xmm4
+; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
+; AVX512-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512-NEXT: vpsraw $4, %xmm0, %xmm3
+; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vpsraw $2, %xmm0, %xmm3
+; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vpsraw $1, %xmm0, %xmm3
+; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX512-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
+; AVX512-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsubq %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
; XOP-NEXT: vpsrad %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vpsrad %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: xorps %xmm2, %xmm2
; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX512-NEXT: vpsraw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movd %xmm1, %eax
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512-NEXT: vpsraw $4, %xmm3, %xmm4
+; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT: vpsraw $2, %xmm3, %xmm4
+; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT: vpsraw $1, %xmm3, %xmm4
+; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
+; AVX512-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512-NEXT: vpsraw $4, %xmm0, %xmm3
+; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vpsraw $2, %xmm0, %xmm3
+; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vpsraw $1, %xmm0, %xmm3
+; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512-NEXT: vpsraw $4, %xmm3, %xmm4
+; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT: vpsraw $2, %xmm3, %xmm4
+; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT: vpsraw $1, %xmm3, %xmm4
+; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
+; AVX512-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512-NEXT: vpsraw $4, %xmm0, %xmm3
+; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vpsraw $2, %xmm0, %xmm3
+; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vpsraw $1, %xmm0, %xmm3
+; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrad $7, %xmm0, %xmm1
+; AVX512-NEXT: vpsrlq $7, %xmm0, %xmm0
+; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; XOP-NEXT: vpsrad $5, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrad $5, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrad $5, %xmm0
; XOP-NEXT: vpsraw $3, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsraw $3, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psraw $3, %xmm0
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrlw $3, %xmm0
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
-
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Variable Shifts
;
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: vpsubq %ymm3, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX512-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
+; AVX512-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubq %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = ashr <4 x i64> %a, %b
ret <4 x i64> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsravd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = ashr <8 x i32> %a, %b
ret <8 x i32> %shift
}
; XOPAVX2-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
%shift = ashr <16 x i16> %a, %b
ret <16 x i16> %shift
}
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512-NEXT: vpsraw $4, %ymm3, %ymm4
+; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpsraw $2, %ymm3, %ymm4
+; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpsraw $1, %ymm3, %ymm4
+; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512-NEXT: vpsraw $4, %ymm0, %ymm3
+; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $2, %ymm0, %ymm3
+; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $1, %ymm0, %ymm3
+; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = ashr <32 x i8> %a, %b
ret <32 x i8> %shift
}
; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; XOPAVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX512-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
+; AVX512-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = ashr <4 x i64> %a, %splat
ret <4 x i64> %shift
; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
; XOPAVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vpsrad %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = ashr <8 x i32> %a, %splat
ret <8 x i32> %shift
; XOPAVX2-NEXT: vmovd %eax, %xmm1
; XOPAVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movzwl %ax, %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpsraw %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = ashr <16 x i16> %a, %splat
ret <16 x i16> %shift
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512-NEXT: vpsraw $4, %ymm3, %ymm4
+; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpsraw $2, %ymm3, %ymm4
+; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpsraw $1, %ymm3, %ymm4
+; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512-NEXT: vpsraw $4, %ymm0, %ymm3
+; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $2, %ymm0, %ymm3
+; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $1, %ymm0, %ymm3
+; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i8> %a, %splat
ret <32 x i8> %shift
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [4611686018427387904,72057594037927936,4294967296,2]
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = ashr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = ashr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
}
; XOPAVX2-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
%shift = ashr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512-NEXT: vpsraw $4, %ymm3, %ymm4
+; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpsraw $2, %ymm3, %ymm4
+; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT: vpsraw $1, %ymm3, %ymm4
+; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512-NEXT: vpsraw $4, %ymm0, %ymm3
+; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $2, %ymm0, %ymm3
+; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $1, %ymm0, %ymm3
+; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = ashr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrad $7, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlq $7, %ymm0, %ymm0
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512-NEXT: retq
%shift = ashr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrad $5, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrad $5, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = ashr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsraw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsraw $3, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlw $3, %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = ashr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; TODO: Add AVX512BW shift support
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
-
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Variable Shifts
;
}
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: var_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsravd %ymm5, %ymm6, %ymm5
-; ALL-NEXT: vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsravd %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsravd %ymm2, %ymm5, %ymm2
-; ALL-NEXT: vpsrld $16, %ymm2, %ymm2
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsravd %ymm3, %ymm1, %ymm1
-; ALL-NEXT: vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsravd %ymm5, %ymm6, %ymm5
+; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsravd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsravd %ymm2, %ymm5, %ymm2
+; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsravd %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = ashr <32 x i16> %a, %b
ret <32 x i16> %shift
}
define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: var_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; ALL-NEXT: vpsraw $4, %ymm5, %ymm6
-; ALL-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
-; ALL-NEXT: vpsraw $2, %ymm5, %ymm6
-; ALL-NEXT: vpaddw %ymm4, %ymm4, %ymm4
-; ALL-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
-; ALL-NEXT: vpsraw $1, %ymm5, %ymm6
-; ALL-NEXT: vpaddw %ymm4, %ymm4, %ymm4
-; ALL-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm4
-; ALL-NEXT: vpsrlw $8, %ymm4, %ymm4
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; ALL-NEXT: vpsraw $4, %ymm0, %ymm5
-; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpsraw $2, %ymm0, %ymm5
-; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpsraw $1, %ymm0, %ymm5
-; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $8, %ymm0, %ymm0
-; ALL-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
-; ALL-NEXT: vpsllw $5, %ymm3, %ymm2
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; ALL-NEXT: vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsraw $2, %ymm4, %ymm5
-; ALL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsraw $1, %ymm4, %ymm5
-; ALL-NEXT: vpaddw %ymm3, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
-; ALL-NEXT: vpsrlw $8, %ymm3, %ymm3
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; ALL-NEXT: vpsraw $4, %ymm1, %ymm4
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT: vpsraw $2, %ymm1, %ymm4
-; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT: vpsraw $1, %ymm1, %ymm4
-; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT: vpsrlw $8, %ymm1, %ymm1
-; ALL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512DQ-NEXT: vpsraw $4, %ymm5, %ymm6
+; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpsraw $2, %ymm5, %ymm6
+; AVX512DQ-NEXT: vpaddw %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpsraw $1, %ymm5, %ymm6
+; AVX512DQ-NEXT: vpaddw %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm4
+; AVX512DQ-NEXT: vpsrlw $8, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $5, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-NEXT: vpsraw $4, %ymm1, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsraw $2, %ymm1, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsraw $1, %ymm1, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%shift = ashr <64 x i8> %a, %b
ret <64 x i8> %shift
}
}
define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vmovd %xmm2, %eax
-; ALL-NEXT: movzwl %ax, %eax
-; ALL-NEXT: vmovd %eax, %xmm2
-; ALL-NEXT: vpsraw %xmm2, %ymm0, %ymm0
-; ALL-NEXT: vpsraw %xmm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatvar_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovd %xmm2, %eax
+; AVX512DQ-NEXT: movzwl %ax, %eax
+; AVX512DQ-NEXT: vmovd %eax, %xmm2
+; AVX512DQ-NEXT: vpsraw %xmm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatvar_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vmovd %xmm1, %eax
+; AVX512BW-NEXT: movzwl %ax, %eax
+; AVX512BW-NEXT: vmovd %eax, %xmm1
+; AVX512BW-NEXT: vpsraw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i16> %a, %splat
ret <32 x i16> %shift
}
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpbroadcastb %xmm2, %ymm2
-; ALL-NEXT: vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; ALL-NEXT: vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsraw $2, %ymm4, %ymm5
-; ALL-NEXT: vpaddw %ymm3, %ymm3, %ymm6
-; ALL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsraw $1, %ymm4, %ymm5
-; ALL-NEXT: vpaddw %ymm6, %ymm6, %ymm7
-; ALL-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsrlw $8, %ymm4, %ymm4
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; ALL-NEXT: vpsraw $4, %ymm0, %ymm5
-; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpsraw $2, %ymm0, %ymm5
-; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm8
-; ALL-NEXT: vpblendvb %ymm8, %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpsraw $1, %ymm0, %ymm5
-; ALL-NEXT: vpaddw %ymm8, %ymm8, %ymm9
-; ALL-NEXT: vpblendvb %ymm9, %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $8, %ymm0, %ymm0
-; ALL-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; ALL-NEXT: vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
-; ALL-NEXT: vpsraw $2, %ymm3, %ymm4
-; ALL-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpsraw $1, %ymm3, %ymm4
-; ALL-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpsrlw $8, %ymm3, %ymm3
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; ALL-NEXT: vpsraw $4, %ymm1, %ymm4
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT: vpsraw $2, %ymm1, %ymm2
-; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsraw $1, %ymm1, %ymm2
-; ALL-NEXT: vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsrlw $8, %ymm1, %ymm1
-; ALL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatvar_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm6
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm6, %ymm6, %ymm7
+; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsrlw $8, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm8
+; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm8, %ymm8, %ymm9
+; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-NEXT: vpsraw $4, %ymm1, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsraw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsraw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = ashr <64 x i8> %a, %splat
ret <64 x i8> %shift
}
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: constant_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vpxor %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsravd %ymm4, %ymm5, %ymm5
-; ALL-NEXT: vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsravd %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsravd %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpsrld $16, %ymm3, %ymm3
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsravd %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT: vpackusdw %ymm3, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsravd %ymm4, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsravd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsravd %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsrld $16, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsravd %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackusdw %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = ashr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <32 x i16> %shift
}
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: constant_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; ALL-NEXT: vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; ALL-NEXT: vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsraw $2, %ymm4, %ymm5
-; ALL-NEXT: vpaddw %ymm3, %ymm3, %ymm6
-; ALL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsraw $1, %ymm4, %ymm5
-; ALL-NEXT: vpaddw %ymm6, %ymm6, %ymm7
-; ALL-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsrlw $8, %ymm4, %ymm4
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; ALL-NEXT: vpsraw $4, %ymm0, %ymm5
-; ALL-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpsraw $2, %ymm0, %ymm5
-; ALL-NEXT: vpaddw %ymm2, %ymm2, %ymm8
-; ALL-NEXT: vpblendvb %ymm8, %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpsraw $1, %ymm0, %ymm5
-; ALL-NEXT: vpaddw %ymm8, %ymm8, %ymm9
-; ALL-NEXT: vpblendvb %ymm9, %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $8, %ymm0, %ymm0
-; ALL-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
-; ALL-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; ALL-NEXT: vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
-; ALL-NEXT: vpsraw $2, %ymm3, %ymm4
-; ALL-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpsraw $1, %ymm3, %ymm4
-; ALL-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpsrlw $8, %ymm3, %ymm3
-; ALL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; ALL-NEXT: vpsraw $4, %ymm1, %ymm4
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT: vpsraw $2, %ymm1, %ymm2
-; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsraw $1, %ymm1, %ymm2
-; ALL-NEXT: vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsrlw $8, %ymm1, %ymm1
-; ALL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm6
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm6, %ymm6, %ymm7
+; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsrlw $8, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm8
+; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpaddw %ymm8, %ymm8, %ymm9
+; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-NEXT: vpsraw $4, %ymm1, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsraw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsraw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%shift = ashr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
}
}
define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsraw $3, %ymm0, %ymm0
-; ALL-NEXT: vpsraw $3, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatconstant_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsraw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsraw $3, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = ashr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <32 x i16> %shift
}
define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsrlw $3, %ymm0, %ymm0
-; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
-; ALL-NEXT: vpand %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; ALL-NEXT: vpxor %ymm3, %ymm0, %ymm0
-; ALL-NEXT: vpsubb %ymm3, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $3, %ymm1, %ymm1
-; ALL-NEXT: vpand %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpxor %ymm3, %ymm1, %ymm1
-; ALL-NEXT: vpsubb %ymm3, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatconstant_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512DQ-NEXT: vpxor %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsubb %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsubb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v64i8:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <64 x i8> %shift
}
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
; XOPAVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: var_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
; XOPAVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: var_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: var_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $12, %xmm1
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: var_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $2, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $5, %xmm1
; XOP-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
; XOP-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vpsrld %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: xorps %xmm2, %xmm2
; XOP-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX512-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movd %xmm1, %eax
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $2, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $2, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
; XOP-NEXT: vpsrlq $7, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlq $7, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrlq $7, %xmm0
; XOP-NEXT: vpsrld $5, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrld $5, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrld $5, %xmm0
; XOP-NEXT: vpsrlw $3, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrlw $3, %xmm0
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrlw $3, %xmm0
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
-
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Variable Shifts
;
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <4 x i64> %a, %b
ret <4 x i64> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <8 x i32> %a, %b
ret <8 x i32> %shift
}
; XOPAVX2-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
%shift = lshr <16 x i16> %a, %b
ret <16 x i16> %shift
}
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $2, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <32 x i8> %a, %b
ret <32 x i8> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = lshr <4 x i64> %a, %splat
ret <4 x i64> %shift
; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
; XOPAVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vpsrld %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = lshr <8 x i32> %a, %splat
ret <8 x i32> %shift
; XOPAVX2-NEXT: vmovd %eax, %xmm1
; XOPAVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movzwl %ax, %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = lshr <16 x i16> %a, %splat
ret <16 x i16> %shift
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $2, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i8> %a, %splat
ret <32 x i8> %shift
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
}
; XOPAVX2-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
%shift = lshr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $2, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlq $7, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrld $5, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrld $5, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlw $3, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsrlw $3, %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; TODO: Add AVX512BW shift support
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Variable Shifts
}
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: var_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsrlvd %ymm5, %ymm6, %ymm5
-; ALL-NEXT: vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsrlvd %ymm2, %ymm5, %ymm2
-; ALL-NEXT: vpsrld $16, %ymm2, %ymm2
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1
-; ALL-NEXT: vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsrlvd %ymm5, %ymm6, %ymm5
+; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm5, %ymm2
+; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = lshr <32 x i16> %a, %b
ret <32 x i16> %shift
}
define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: var_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsrlw $4, %ymm0, %ymm4
-; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; ALL-NEXT: vpand %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $2, %ymm0, %ymm4
-; ALL-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; ALL-NEXT: vpand %ymm6, %ymm4, %ymm4
-; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $1, %ymm0, %ymm4
-; ALL-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; ALL-NEXT: vpand %ymm7, %ymm4, %ymm4
-; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $4, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT: vpsllw $5, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsrlw $2, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm6, %ymm2, %ymm2
-; ALL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsrlw $1, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm7, %ymm2, %ymm2
-; ALL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm4
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512DQ-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm4
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512DQ-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsllw $5, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+
%shift = lshr <64 x i8> %a, %b
ret <64 x i8> %shift
}
}
define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vmovd %xmm2, %eax
-; ALL-NEXT: movzwl %ax, %eax
-; ALL-NEXT: vmovd %eax, %xmm2
-; ALL-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatvar_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovd %xmm2, %eax
+; AVX512DQ-NEXT: movzwl %ax, %eax
+; AVX512DQ-NEXT: vmovd %eax, %xmm2
+; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatvar_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vmovd %xmm1, %eax
+; AVX512BW-NEXT: movzwl %ax, %eax
+; AVX512BW-NEXT: vmovd %eax, %xmm1
+; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i16> %a, %splat
ret <32 x i16> %shift
}
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpbroadcastb %xmm2, %ymm2
-; ALL-NEXT: vpsrlw $4, %ymm0, %ymm3
-; ALL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; ALL-NEXT: vpand %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $2, %ymm0, %ymm3
-; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; ALL-NEXT: vpand %ymm5, %ymm3, %ymm3
-; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm6
-; ALL-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $1, %ymm0, %ymm3
-; ALL-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; ALL-NEXT: vpand %ymm7, %ymm3, %ymm3
-; ALL-NEXT: vpaddb %ymm6, %ymm6, %ymm8
-; ALL-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $4, %ymm1, %ymm3
-; ALL-NEXT: vpand %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; ALL-NEXT: vpsrlw $2, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsrlw $1, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm7, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatvar_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm6
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm8
+; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm3
+; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = lshr <64 x i8> %a, %splat
ret <64 x i8> %shift
}
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: constant_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vpxor %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsrlvd %ymm4, %ymm5, %ymm5
-; ALL-NEXT: vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsrlvd %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpsrld $16, %ymm3, %ymm3
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsrlvd %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT: vpackusdw %ymm3, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsrld $16, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackusdw %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = lshr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <32 x i16> %shift
}
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: constant_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsrlw $4, %ymm0, %ymm2
-; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; ALL-NEXT: vpand %ymm3, %ymm2, %ymm2
-; ALL-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; ALL-NEXT: vpsllw $5, %ymm4, %ymm4
-; ALL-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $2, %ymm0, %ymm2
-; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT: vpaddb %ymm4, %ymm4, %ymm6
-; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $1, %ymm0, %ymm2
-; ALL-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; ALL-NEXT: vpand %ymm7, %ymm2, %ymm2
-; ALL-NEXT: vpaddb %ymm6, %ymm6, %ymm8
-; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $4, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm3, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsrlw $2, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsrlw $1, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm7, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512DQ-NEXT: vpsllw $5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm4, %ymm4, %ymm6
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm8
+; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%shift = lshr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
}
}
define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsrlw $3, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $3, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatconstant_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = lshr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <32 x i16> %shift
}
define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsrlw $3, %ymm0, %ymm0
-; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
-; ALL-NEXT: vpand %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrlw $3, %ymm1, %ymm1
-; ALL-NEXT: vpand %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatconstant_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v64i8:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = lshr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <64 x i8> %shift
}
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
; XOPAVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: var_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
; XOPAVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: var_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pslld $23, %xmm1
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: var_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $12, %xmm1
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: var_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $5, %xmm1
; XOP-NEXT: vpsllq %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
; XOP-NEXT: vpslld %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vpslld %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: xorps %xmm2, %xmm2
; XOP-NEXT: vpsllw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX512-NEXT: vpsllw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movd %xmm1, %eax
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: splatvar_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; XOPAVX2-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pmullw .LCPI10_0, %xmm0
; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: constant_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
; XOP-NEXT: vpsllq $7, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllq $7, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllq $7, %xmm0
; XOP-NEXT: vpslld $5, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpslld $5, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pslld $5, %xmm0
; XOP-NEXT: vpsllw $3, %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $3, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $3, %xmm0
; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
+; AVX512-LABEL: splatconstant_shift_v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $3, %xmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $3, %xmm0
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Variable Shifts
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <4 x i64> %a, %b
ret <4 x i64> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <8 x i32> %a, %b
ret <8 x i32> %shift
}
; XOPAVX2-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
%shift = shl <16 x i16> %a, %b
ret <16 x i16> %shift
}
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: var_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsllw $2, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <32 x i8> %a, %b
ret <32 x i8> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllq %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = shl <4 x i64> %a, %splat
ret <4 x i64> %shift
; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
; XOPAVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vpslld %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = shl <8 x i32> %a, %splat
ret <8 x i32> %shift
; XOPAVX2-NEXT: vmovd %eax, %xmm1
; XOPAVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movzwl %ax, %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpsllw %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = shl <16 x i16> %a, %splat
ret <16 x i16> %shift
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatvar_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsllw $2, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i8> %a, %splat
ret <32 x i8> %shift
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
%shift = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm0, %xmm0
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsllw $2, %ymm0, %ymm2
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsllq $7, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllq $7, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpslld $5, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpslld $5, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
}
; XOPAVX2: # BB#0:
; XOPAVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $3, %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
; XOPAVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: splatconstant_shift_v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $3, %ymm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: retq
%shift = shl <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; TODO: Add AVX512BW shift support
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Variable Shifts
}
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: var_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsllvd %ymm5, %ymm6, %ymm5
-; ALL-NEXT: vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
-; ALL-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT: vpsllvd %ymm2, %ymm5, %ymm2
-; ALL-NEXT: vpsrld $16, %ymm2, %ymm2
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
-; ALL-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT: vpsllvd %ymm3, %ymm1, %ymm1
-; ALL-NEXT: vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsllvd %ymm5, %ymm6, %ymm5
+; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
+; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm5, %ymm2
+; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
+; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT: vpsllvd %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = shl <32 x i16> %a, %b
ret <32 x i16> %shift
}
define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: var_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsllw $4, %ymm0, %ymm4
-; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; ALL-NEXT: vpand %ymm5, %ymm4, %ymm4
-; ALL-NEXT: vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT: vpsllw $2, %ymm0, %ymm4
-; ALL-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; ALL-NEXT: vpand %ymm6, %ymm4, %ymm4
-; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT: vpaddb %ymm0, %ymm0, %ymm4
-; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT: vpsllw $4, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT: vpsllw $5, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsllw $2, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm6, %ymm2, %ymm2
-; ALL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpaddb %ymm1, %ymm1, %ymm2
-; ALL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm4
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512DQ-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm4
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512DQ-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm4
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsllw $5, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%shift = shl <64 x i8> %a, %b
ret <64 x i8> %shift
}
}
define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vmovd %xmm2, %eax
-; ALL-NEXT: movzwl %ax, %eax
-; ALL-NEXT: vmovd %eax, %xmm2
-; ALL-NEXT: vpsllw %xmm2, %ymm0, %ymm0
-; ALL-NEXT: vpsllw %xmm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatvar_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovd %xmm2, %eax
+; AVX512DQ-NEXT: movzwl %ax, %eax
+; AVX512DQ-NEXT: vmovd %eax, %xmm2
+; AVX512DQ-NEXT: vpsllw %xmm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatvar_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vmovd %xmm1, %eax
+; AVX512BW-NEXT: movzwl %ax, %eax
+; AVX512BW-NEXT: vmovd %eax, %xmm1
+; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i16> %a, %splat
ret <32 x i16> %shift
}
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpbroadcastb %xmm2, %ymm2
-; ALL-NEXT: vpsllw $4, %ymm0, %ymm3
-; ALL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; ALL-NEXT: vpand %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; ALL-NEXT: vpsllw $2, %ymm0, %ymm3
-; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; ALL-NEXT: vpand %ymm5, %ymm3, %ymm3
-; ALL-NEXT: vpaddb %ymm2, %ymm2, %ymm6
-; ALL-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
-; ALL-NEXT: vpaddb %ymm0, %ymm0, %ymm3
-; ALL-NEXT: vpaddb %ymm6, %ymm6, %ymm7
-; ALL-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
-; ALL-NEXT: vpsllw $4, %ymm1, %ymm3
-; ALL-NEXT: vpand %ymm4, %ymm3, %ymm3
-; ALL-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; ALL-NEXT: vpsllw $2, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpaddb %ymm1, %ymm1, %ymm2
-; ALL-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatvar_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm6
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm7
+; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm3
+; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = shl <64 x i8> %a, %splat
ret <64 x i8> %shift
}
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: constant_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
-; ALL-NEXT: vpmullw %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512DQ-NEXT: vpmullw %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmullw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = shl <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <32 x i16> %shift
}
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: constant_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsllw $4, %ymm0, %ymm2
-; ALL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; ALL-NEXT: vpand %ymm3, %ymm2, %ymm2
-; ALL-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; ALL-NEXT: vpsllw $5, %ymm4, %ymm4
-; ALL-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsllw $2, %ymm0, %ymm2
-; ALL-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT: vpaddb %ymm4, %ymm4, %ymm6
-; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; ALL-NEXT: vpaddb %ymm6, %ymm6, %ymm7
-; ALL-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsllw $4, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm3, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpsllw $2, %ymm1, %ymm2
-; ALL-NEXT: vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: vpaddb %ymm1, %ymm1, %ymm2
-; ALL-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512DQ-NEXT: vpsllw $5, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm4, %ymm4, %ymm6
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm7
+; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
%shift = shl <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <64 x i8> %shift
}
}
define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v32i16:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsllw $3, %ymm0, %ymm0
-; ALL-NEXT: vpsllw $3, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatconstant_shift_v32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsllw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = shl <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <32 x i16> %shift
}
define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v64i8:
-; ALL: ## BB#0:
-; ALL-NEXT: vpsllw $3, %ymm0, %ymm0
-; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
-; ALL-NEXT: vpand %ymm2, %ymm0, %ymm0
-; ALL-NEXT: vpsllw $3, %ymm1, %ymm1
-; ALL-NEXT: vpand %ymm2, %ymm1, %ymm1
-; ALL-NEXT: retq
+; AVX512DQ-LABEL: splatconstant_shift_v64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsllw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v64i8:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsllw $3, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%shift = shl <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <64 x i8> %shift
}