setOperationAction(ISD::XOR, MVT::v16i32, Legal);
if (Subtarget->hasCDI()) {
- setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
+ setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i64, Legal);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i64, Legal);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i32, Legal);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v16i16, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v32i8, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i16, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i8, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i16, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i8, Custom);
+
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i32, Custom);
- }
- if (Subtarget->hasVLX() && Subtarget->hasCDI()) {
- setOperationAction(ISD::CTLZ, MVT::v4i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v8i32, Legal);
- setOperationAction(ISD::CTLZ, MVT::v2i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v4i32, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i64, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i32, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Legal);
-
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
- }
+
+ if (Subtarget->hasVLX()) {
+ setOperationAction(ISD::CTLZ, MVT::v4i64, Legal);
+ setOperationAction(ISD::CTLZ, MVT::v8i32, Legal);
+ setOperationAction(ISD::CTLZ, MVT::v2i64, Legal);
+ setOperationAction(ISD::CTLZ, MVT::v4i32, Legal);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i64, Legal);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i32, Legal);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Legal);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Legal);
+
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
+ } else {
+ setOperationAction(ISD::CTLZ, MVT::v4i64, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v8i32, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i64, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i32, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Custom);
+ }
+ } // Subtarget->hasCDI()
+
if (Subtarget->hasDQI()) {
setOperationAction(ISD::MUL, MVT::v2i64, Legal);
setOperationAction(ISD::MUL, MVT::v4i64, Legal);
setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Legal);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Legal);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Custom);
if (Subtarget->hasVLX())
setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
+ if (Subtarget->hasCDI()) {
+ setOperationAction(ISD::CTLZ, MVT::v32i16, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v64i8, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i16, Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v64i8, Custom);
+ }
+
for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
const MVT VT = (MVT::SimpleValueType)i;
ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
}
-static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
+/// \brief Lower a vector CTLZ using native supported vector CTLZ instruction.
+//
+// 1. i32/i64 128/256-bit vector (native support require VLX) are expended
+// to 512-bit vector.
+// 2. i8/i16 vector implemented using dword LZCNT vector instruction
+// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
+// split the vector, perform operation on it's Lo a Hi part and
+// concatenate the results.
+static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
+ SDLoc dl(Op);
+ MVT VT = Op.getSimpleValueType();
+ MVT EltVT = VT.getVectorElementType();
+ unsigned NumElems = VT.getVectorNumElements();
+
+ if (EltVT == MVT::i64 || EltVT == MVT::i32) {
+ // Extend to 512 bit vector.
+ assert((VT.is256BitVector() || VT.is128BitVector()) &&
+ "Unsupported value type for operation");
+
+ MVT NewVT = MVT::getVectorVT(EltVT, 512 / VT.getScalarSizeInBits());
+ SDValue Vec512 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewVT,
+ DAG.getUNDEF(NewVT),
+ Op.getOperand(0),
+ DAG.getIntPtrConstant(0, dl));
+ SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Vec512);
+
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CtlzNode,
+ DAG.getIntPtrConstant(0, dl));
+ }
+
+ assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
+ "Unsupported element type");
+
+ if (16 < NumElems) {
+ // Split vector, it's Lo and Hi parts will be handled in next iteration.
+ SDValue Lo, Hi;
+ std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
+ MVT OutVT = MVT::getVectorVT(EltVT, NumElems/2);
+
+ Lo = DAG.getNode(Op.getOpcode(), dl, OutVT, Lo);
+ Hi = DAG.getNode(Op.getOpcode(), dl, OutVT, Hi);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
+ }
+
+ MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
+
+ assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
+ "Unsupported value type for operation");
+
+ // Use native supported vector instruction vplzcntd.
+ Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
+ SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
+ SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
+ SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
+
+ return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
+}
+
+static SDValue LowerCTLZ(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
SDLoc dl(Op);
+ if (VT.isVector() && Subtarget->hasAVX512())
+ return LowerVectorCTLZ_AVX512(Op, DAG);
+
Op = Op.getOperand(0);
if (VT == MVT::i8) {
// Zero extend to i32 since there is not an i8 bsr.
return Op;
}
-static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
SDLoc dl(Op);
+ if (VT.isVector() && Subtarget->hasAVX512())
+ return LowerVectorCTLZ_AVX512(Op, DAG);
+
Op = Op.getOperand(0);
if (VT == MVT::i8) {
// Zero extend to i32 since there is not an i8 bsr.
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
- case ISD::CTLZ: return LowerCTLZ(Op, DAG);
- case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
+ case ISD::CTLZ: return LowerCTLZ(Op, Subtarget, DAG);
+ case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, Subtarget, DAG);
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, DAG);
case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
(INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
(INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
+def : Pat<(insert_subvector undef, (v16i16 VR256X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
+def : Pat<(insert_subvector undef, (v32i8 VR256X:$src), (iPTR 0)),
+ (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
// vextractps - extract 32 bits from XMM
def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VLCD
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512VLCD --check-prefix=ALL --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=AVX512CD --check-prefix=ALL --check-prefix=AVX512
define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; SSE2-LABEL: testv2i64:
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
;
-; AVX512-LABEL: testv2i64:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vplzcntq %xmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv2i64:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv2i64:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
+; AVX512CD-NEXT: retq
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %in, i1 0)
ret <2 x i64> %out
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
;
-; AVX512-LABEL: testv2i64u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vplzcntq %xmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv2i64u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv2i64u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
+; AVX512CD-NEXT: retq
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %in, i1 -1)
ret <2 x i64> %out
; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: testv4i32:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vplzcntd %xmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv4i32:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv4i32:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: retq
%out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %in, i1 0)
ret <4 x i32> %out
; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: testv4i32u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vplzcntd %xmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv4i32u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv4i32u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: retq
%out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %in, i1 -1)
ret <4 x i32> %out
; AVX-NEXT: vpinsrw $7, %ecx, %xmm1, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: testv8i16:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vpextrw $1, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vmovd %xmm0, %ecx
-; AVX512-NEXT: lzcntw %cx, %cx
-; AVX512-NEXT: vmovd %ecx, %xmm1
-; AVX512-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $2, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $3, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $5, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $6, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $7, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv8i16:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vpmovzxwd %xmm0, %ymm0
+; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
+; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VLCD-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv8i16:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512CD-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX512CD-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512CD-NEXT: retq
%out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 0)
ret <8 x i16> %out
}
; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: testv8i16u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vpextrw $1, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vmovd %xmm0, %ecx
-; AVX512-NEXT: lzcntw %cx, %cx
-; AVX512-NEXT: vmovd %ecx, %xmm1
-; AVX512-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $2, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $3, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $5, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $6, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrw $7, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv8i16u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vpmovzxwd %xmm0, %ymm0
+; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
+; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VLCD-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv8i16u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512CD-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX512CD-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512CD-NEXT: retq
%out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 -1)
ret <8 x i16> %out
}
; AVX-NEXT: vpinsrb $15, %ecx, %xmm1, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: testv16i8:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vpextrb $1, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX512-NEXT: lzcntl %ecx, %ecx
-; AVX512-NEXT: addl $-24, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm1
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $2, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $3, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $4, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $5, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $6, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $7, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $9, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $10, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $11, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $12, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $13, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $14, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $15, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv16i8:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512VLCD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512VLCD-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VLCD-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv16i8:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512CD-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512CD-NEXT: retq
%out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0)
ret <16 x i8> %out
}
; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: testv16i8u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vpextrb $1, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX512-NEXT: lzcntl %ecx, %ecx
-; AVX512-NEXT: addl $-24, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm1
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $2, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $3, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $4, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $5, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $6, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $7, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $9, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $10, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $11, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $12, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $13, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $14, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrb $15, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv16i8u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512VLCD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512VLCD-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VLCD-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv16i8u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512CD-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512CD-NEXT: retq
%out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1)
ret <16 x i8> %out
}
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv2i64:
-; AVX512: ## BB#0:
-; AVX512-NEXT: movl $55, %eax
-; AVX512-NEXT: vmovq %rax, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv2i64:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: movl $55, %eax
+; AVX512VLCD-NEXT: vmovq %rax, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv2i64:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: movl $55, %eax
+; AVX512CD-NEXT: vmovq %rax, %xmm0
+; AVX512CD-NEXT: retq
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 0)
ret <2 x i64> %out
}
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv2i64u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: movl $55, %eax
-; AVX512-NEXT: vmovq %rax, %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv2i64u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: movl $55, %eax
+; AVX512VLCD-NEXT: vmovq %rax, %xmm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv2i64u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: movl $55, %eax
+; AVX512CD-NEXT: vmovq %rax, %xmm0
+; AVX512CD-NEXT: retq
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 -1)
ret <2 x i64> %out
}
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv4i32:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa32 {{.*}}(%rip), %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv4i32:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa32 {{.*#+}} xmm0 = [23,0,32,24]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv4i32:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
+; AVX512CD-NEXT: retq
%out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 0)
ret <4 x i32> %out
}
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv4i32u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa32 {{.*}}(%rip), %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv4i32u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa32 {{.*#+}} xmm0 = [23,0,32,24]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv4i32u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24]
+; AVX512CD-NEXT: retq
%out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 -1)
ret <4 x i32> %out
}
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv8i16:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv8i16:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv8i16:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; AVX512CD-NEXT: retq
%out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 0)
ret <8 x i16> %out
}
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv8i16u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv8i16u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv8i16u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
+; AVX512CD-NEXT: retq
%out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 -1)
ret <8 x i16> %out
}
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv16i8:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv16i8:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv16i8:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; AVX512CD-NEXT: retq
%out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 0)
ret <16 x i8> %out
}
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv16i8u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %xmm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv16i8u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv16i8u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
+; AVX512CD-NEXT: retq
%out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 -1)
ret <16 x i8> %out
}
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl| FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VLCD
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl| FileCheck %s --check-prefix=AVX512VLCD --check-prefix=ALL --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=AVX512CD --check-prefix=ALL --check-prefix=AVX512
define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: testv4i64:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vplzcntq %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv4i64:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vplzcntq %ymm0, %ymm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv4i64:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
+; AVX512CD-NEXT: retq
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 0)
ret <4 x i64> %out
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: testv4i64u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vplzcntq %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv4i64u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vplzcntq %ymm0, %ymm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv4i64u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
+; AVX512CD-NEXT: retq
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 -1)
ret <4 x i64> %out
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: testv8i32:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vplzcntd %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv8i32:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv8i32:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: retq
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 0)
ret <8 x i32> %out
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: testv8i32u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vplzcntd %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv8i32u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv8i32u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: retq
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 -1)
ret <8 x i32> %out
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: testv16i16:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpextrw $1, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vmovd %xmm1, %ecx
-; AVX512-NEXT: lzcntw %cx, %cx
-; AVX512-NEXT: vmovd %ecx, %xmm2
-; AVX512-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $2, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $3, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $4, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $5, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $6, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $7, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
-; AVX512-NEXT: vpextrw $1, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vmovd %xmm0, %ecx
-; AVX512-NEXT: lzcntw %cx, %cx
-; AVX512-NEXT: vmovd %ecx, %xmm2
-; AVX512-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $2, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $3, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $5, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $6, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $7, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
-; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv16i16:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vpmovzxwd %ymm0, %zmm0
+; AVX512VLCD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512VLCD-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512VLCD-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv16i16:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vpmovzxwd %ymm0, %zmm0
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512CD-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CD-NEXT: retq
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 0)
ret <16 x i16> %out
}
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: testv16i16u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpextrw $1, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vmovd %xmm1, %ecx
-; AVX512-NEXT: lzcntw %cx, %cx
-; AVX512-NEXT: vmovd %ecx, %xmm2
-; AVX512-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $2, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $3, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $4, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $5, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $6, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $7, %xmm1, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1
-; AVX512-NEXT: vpextrw $1, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vmovd %xmm0, %ecx
-; AVX512-NEXT: lzcntw %cx, %cx
-; AVX512-NEXT: vmovd %ecx, %xmm2
-; AVX512-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $2, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $3, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $5, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $6, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrw $7, %xmm0, %eax
-; AVX512-NEXT: lzcntw %ax, %ax
-; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
-; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv16i16u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vpmovzxwd %ymm0, %zmm0
+; AVX512VLCD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512VLCD-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512VLCD-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv16i16u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vpmovzxwd %ymm0, %zmm0
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512CD-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CD-NEXT: retq
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 -1)
ret <16 x i16> %out
}
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: testv32i8:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpextrb $1, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpextrb $0, %xmm1, %ecx
-; AVX512-NEXT: lzcntl %ecx, %ecx
-; AVX512-NEXT: addl $-24, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm2
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $2, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $3, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $4, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $5, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $6, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $7, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $8, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $9, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $10, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $11, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $12, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $13, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $14, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $15, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX512-NEXT: vpextrb $1, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX512-NEXT: lzcntl %ecx, %ecx
-; AVX512-NEXT: addl $-24, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm2
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $2, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $3, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $4, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $5, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $6, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $7, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $9, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $10, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $11, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $12, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $13, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $14, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $15, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
-; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv32i8:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512VLCD-NEXT: vpmovzxbd %xmm1, %zmm1
+; AVX512VLCD-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512VLCD-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} xmm2 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; AVX512VLCD-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX512VLCD-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512VLCD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512VLCD-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VLCD-NEXT: vpsubb %xmm2, %xmm0, %xmm0
+; AVX512VLCD-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv32i8:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512CD-NEXT: vpmovzxbd %xmm1, %zmm1
+; AVX512CD-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512CD-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512CD-NEXT: vmovdqa {{.*#+}} xmm2 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; AVX512CD-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX512CD-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512CD-NEXT: vpsubb %xmm2, %xmm0, %xmm0
+; AVX512CD-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512CD-NEXT: retq
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0)
ret <32 x i8> %out
}
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512-LABEL: testv32i8u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpextrb $1, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpextrb $0, %xmm1, %ecx
-; AVX512-NEXT: lzcntl %ecx, %ecx
-; AVX512-NEXT: addl $-24, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm2
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $2, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $3, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $4, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $5, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $6, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $7, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $8, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $9, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $10, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $11, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $12, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $13, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $14, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $15, %xmm1, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX512-NEXT: vpextrb $1, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX512-NEXT: lzcntl %ecx, %ecx
-; AVX512-NEXT: addl $-24, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm2
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $2, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $3, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $4, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $5, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $6, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $7, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $8, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $9, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $10, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $11, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $12, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $13, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $14, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vpextrb $15, %xmm0, %eax
-; AVX512-NEXT: lzcntl %eax, %eax
-; AVX512-NEXT: addl $-24, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
-; AVX512-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: testv32i8u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512VLCD-NEXT: vpmovzxbd %xmm1, %zmm1
+; AVX512VLCD-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512VLCD-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} xmm2 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; AVX512VLCD-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX512VLCD-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512VLCD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512VLCD-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VLCD-NEXT: vpsubb %xmm2, %xmm0, %xmm0
+; AVX512VLCD-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: testv32i8u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512CD-NEXT: vpmovzxbd %xmm1, %zmm1
+; AVX512CD-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512CD-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512CD-NEXT: vmovdqa {{.*#+}} xmm2 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; AVX512CD-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX512CD-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512CD-NEXT: vpsubb %xmm2, %xmm0, %xmm0
+; AVX512CD-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512CD-NEXT: retq
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1)
ret <32 x i8> %out
}
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv4i64:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv4i64:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} ymm0 = [55,0,64,56]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv4i64:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; AVX512CD-NEXT: retq
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
}
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv4i64u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv4i64u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} ymm0 = [55,0,64,56]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv4i64u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; AVX512CD-NEXT: retq
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out
}
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv8i32:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa32 {{.*}}(%rip), %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv8i32:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa32 {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv8i32:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; AVX512CD-NEXT: retq
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
ret <8 x i32> %out
}
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv8i32u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa32 {{.*}}(%rip), %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv8i32u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa32 {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv8i32u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; AVX512CD-NEXT: retq
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
ret <8 x i32> %out
}
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv16i16:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv16i16:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv16i16:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX512CD-NEXT: retq
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
ret <16 x i16> %out
}
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv16i16u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv16i16u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv16i16u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX512CD-NEXT: retq
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
ret <16 x i16> %out
}
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv32i8:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv32i8:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv32i8:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX512CD-NEXT: retq
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
ret <32 x i8> %out
}
; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; AVX-NEXT: retq
;
-; AVX512-LABEL: foldv32i8u:
-; AVX512: ## BB#0:
-; AVX512-NEXT: vmovdqa64 {{.*}}(%rip), %ymm0
-; AVX512-NEXT: retq
+; AVX512VLCD-LABEL: foldv32i8u:
+; AVX512VLCD: ## BB#0:
+; AVX512VLCD-NEXT: vmovdqa64 {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX512VLCD-NEXT: retq
+;
+; AVX512CD-LABEL: foldv32i8u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX512CD-NEXT: retq
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
ret <32 x i8> %out
}
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512CD
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512BW
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; ALL-LABEL: testv8i64:
define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; ALL-LABEL: testv32i16:
; ALL: ## BB#0:
-; ALL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; ALL-NEXT: vpextrw $1, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vmovd %xmm2, %ecx
-; ALL-NEXT: lzcntw %cx, %cx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $2, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $3, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $4, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $5, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $6, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $7, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
-; ALL-NEXT: vpextrw $1, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vmovd %xmm0, %ecx
-; ALL-NEXT: lzcntw %cx, %cx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $2, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $3, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $4, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $5, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $6, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $7, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm0
-; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT: vextracti128 $1, %ymm1, %xmm2
-; ALL-NEXT: vpextrw $1, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vmovd %xmm2, %ecx
-; ALL-NEXT: lzcntw %cx, %cx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $2, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $3, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $4, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $5, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $6, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $7, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
-; ALL-NEXT: vpextrw $1, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vmovd %xmm1, %ecx
-; ALL-NEXT: lzcntw %cx, %cx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $2, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $3, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $4, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $5, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $6, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $7, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm1
-; ALL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT: vpmovzxwd %ymm0, %zmm0
+; ALL-NEXT: vplzcntd %zmm0, %zmm0
+; ALL-NEXT: vpmovdw %zmm0, %ymm0
+; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; ALL-NEXT: vpsubw %ymm2, %ymm0, %ymm0
+; ALL-NEXT: vpmovzxwd %ymm1, %zmm1
+; ALL-NEXT: vplzcntd %zmm1, %zmm1
+; ALL-NEXT: vpmovdw %zmm1, %ymm1
+; ALL-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; ALL-NEXT: retq
+;
+; AVX512BW-LABEL: testv32i16:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BW-NEXT: vpmovzxwd %ymm1, %zmm1
+; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovzxwd %ymm0, %zmm0
+; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 0)
ret <32 x i16> %out
}
define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; ALL-LABEL: testv32i16u:
; ALL: ## BB#0:
-; ALL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; ALL-NEXT: vpextrw $1, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vmovd %xmm2, %ecx
-; ALL-NEXT: lzcntw %cx, %cx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $2, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $3, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $4, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $5, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $6, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $7, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
-; ALL-NEXT: vpextrw $1, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vmovd %xmm0, %ecx
-; ALL-NEXT: lzcntw %cx, %cx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $2, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $3, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $4, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $5, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $6, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $7, %xmm0, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm0
-; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT: vextracti128 $1, %ymm1, %xmm2
-; ALL-NEXT: vpextrw $1, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vmovd %xmm2, %ecx
-; ALL-NEXT: lzcntw %cx, %cx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $2, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $3, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $4, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $5, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $6, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $7, %xmm2, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
-; ALL-NEXT: vpextrw $1, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vmovd %xmm1, %ecx
-; ALL-NEXT: lzcntw %cx, %cx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $2, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $3, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $4, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $5, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $6, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrw $7, %xmm1, %eax
-; ALL-NEXT: lzcntw %ax, %ax
-; ALL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm1
-; ALL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT: vpmovzxwd %ymm0, %zmm0
+; ALL-NEXT: vplzcntd %zmm0, %zmm0
+; ALL-NEXT: vpmovdw %zmm0, %ymm0
+; ALL-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; ALL-NEXT: vpsubw %ymm2, %ymm0, %ymm0
+; ALL-NEXT: vpmovzxwd %ymm1, %zmm1
+; ALL-NEXT: vplzcntd %zmm1, %zmm1
+; ALL-NEXT: vpmovdw %zmm1, %ymm1
+; ALL-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; ALL-NEXT: retq
+;
+; AVX512BW-LABEL: testv32i16u:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BW-NEXT: vpmovzxwd %ymm1, %zmm1
+; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovzxwd %ymm0, %zmm0
+; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 -1)
ret <32 x i16> %out
}
define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; ALL-LABEL: testv64i8:
; ALL: ## BB#0:
-; ALL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; ALL-NEXT: vpextrb $1, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpextrb $0, %xmm2, %ecx
-; ALL-NEXT: lzcntl %ecx, %ecx
-; ALL-NEXT: addl $-24, %ecx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $2, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $3, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $4, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $5, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $6, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $7, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $8, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $9, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $10, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $11, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $12, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $13, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $14, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $15, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm2
-; ALL-NEXT: vpextrb $1, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpextrb $0, %xmm0, %ecx
-; ALL-NEXT: lzcntl %ecx, %ecx
-; ALL-NEXT: addl $-24, %ecx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $2, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $3, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $4, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $5, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $6, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $7, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $8, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $9, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $10, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $11, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $12, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $13, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $14, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $15, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm0
+; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2
+; ALL-NEXT: vpmovzxbd %xmm2, %zmm2
+; ALL-NEXT: vplzcntd %zmm2, %zmm2
+; ALL-NEXT: vpmovdb %zmm2, %xmm2
+; ALL-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; ALL-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; ALL-NEXT: vpmovzxbd %xmm0, %zmm0
+; ALL-NEXT: vplzcntd %zmm0, %zmm0
+; ALL-NEXT: vpmovdb %zmm0, %xmm0
+; ALL-NEXT: vpsubb %xmm3, %xmm0, %xmm0
; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT: vextracti128 $1, %ymm1, %xmm2
-; ALL-NEXT: vpextrb $1, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpextrb $0, %xmm2, %ecx
-; ALL-NEXT: lzcntl %ecx, %ecx
-; ALL-NEXT: addl $-24, %ecx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $2, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $3, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $4, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $5, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $6, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $7, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $8, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $9, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $10, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $11, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $12, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $13, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $14, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $15, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm2
-; ALL-NEXT: vpextrb $1, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpextrb $0, %xmm1, %ecx
-; ALL-NEXT: lzcntl %ecx, %ecx
-; ALL-NEXT: addl $-24, %ecx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $2, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $3, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $4, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $5, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $6, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $7, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $8, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $9, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $10, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $11, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $12, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $13, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $14, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $15, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm1
+; ALL-NEXT: vextractf128 $1, %ymm1, %xmm2
+; ALL-NEXT: vpmovzxbd %xmm2, %zmm2
+; ALL-NEXT: vplzcntd %zmm2, %zmm2
+; ALL-NEXT: vpmovdb %zmm2, %xmm2
+; ALL-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; ALL-NEXT: vpmovzxbd %xmm1, %zmm1
+; ALL-NEXT: vplzcntd %zmm1, %zmm1
+; ALL-NEXT: vpmovdb %zmm1, %xmm1
+; ALL-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; ALL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; ALL-NEXT: retq
+;
+; AVX512BW-LABEL: testv64i8:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-NEXT: vpmovzxbd %xmm2, %zmm2
+; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpmovzxbd %xmm1, %zmm1
+; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BW-NEXT: vpmovzxbd %xmm2, %zmm2
+; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0)
ret <64 x i8> %out
}
define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; ALL-LABEL: testv64i8u:
; ALL: ## BB#0:
-; ALL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; ALL-NEXT: vpextrb $1, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpextrb $0, %xmm2, %ecx
-; ALL-NEXT: lzcntl %ecx, %ecx
-; ALL-NEXT: addl $-24, %ecx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $2, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $3, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $4, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $5, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $6, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $7, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $8, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $9, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $10, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $11, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $12, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $13, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $14, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $15, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm2
-; ALL-NEXT: vpextrb $1, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpextrb $0, %xmm0, %ecx
-; ALL-NEXT: lzcntl %ecx, %ecx
-; ALL-NEXT: addl $-24, %ecx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $2, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $3, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $4, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $5, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $6, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $7, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $8, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $9, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $10, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $11, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $12, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $13, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $14, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $15, %xmm0, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm0
+; ALL-NEXT: vextractf128 $1, %ymm0, %xmm2
+; ALL-NEXT: vpmovzxbd %xmm2, %zmm2
+; ALL-NEXT: vplzcntd %zmm2, %zmm2
+; ALL-NEXT: vpmovdb %zmm2, %xmm2
+; ALL-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; ALL-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; ALL-NEXT: vpmovzxbd %xmm0, %zmm0
+; ALL-NEXT: vplzcntd %zmm0, %zmm0
+; ALL-NEXT: vpmovdb %zmm0, %xmm0
+; ALL-NEXT: vpsubb %xmm3, %xmm0, %xmm0
; ALL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; ALL-NEXT: vextracti128 $1, %ymm1, %xmm2
-; ALL-NEXT: vpextrb $1, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpextrb $0, %xmm2, %ecx
-; ALL-NEXT: lzcntl %ecx, %ecx
-; ALL-NEXT: addl $-24, %ecx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $2, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $3, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $4, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $5, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $6, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $7, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $8, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $9, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $10, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $11, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $12, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $13, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $14, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $15, %xmm2, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm2
-; ALL-NEXT: vpextrb $1, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpextrb $0, %xmm1, %ecx
-; ALL-NEXT: lzcntl %ecx, %ecx
-; ALL-NEXT: addl $-24, %ecx
-; ALL-NEXT: vmovd %ecx, %xmm3
-; ALL-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $2, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $3, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $4, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $5, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $6, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $7, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $8, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $9, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $10, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $11, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $12, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $13, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $14, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
-; ALL-NEXT: vpextrb $15, %xmm1, %eax
-; ALL-NEXT: lzcntl %eax, %eax
-; ALL-NEXT: addl $-24, %eax
-; ALL-NEXT: vpinsrb $15, %eax, %xmm3, %xmm1
+; ALL-NEXT: vextractf128 $1, %ymm1, %xmm2
+; ALL-NEXT: vpmovzxbd %xmm2, %zmm2
+; ALL-NEXT: vplzcntd %zmm2, %zmm2
+; ALL-NEXT: vpmovdb %zmm2, %xmm2
+; ALL-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; ALL-NEXT: vpmovzxbd %xmm1, %zmm1
+; ALL-NEXT: vplzcntd %zmm1, %zmm1
+; ALL-NEXT: vpmovdb %zmm1, %xmm1
+; ALL-NEXT: vpsubb %xmm3, %xmm1, %xmm1
; ALL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; ALL-NEXT: retq
+;
+; AVX512BW-LABEL: testv64i8u:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-NEXT: vpmovzxbd %xmm2, %zmm2
+; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpmovzxbd %xmm1, %zmm1
+; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BW-NEXT: vpmovzxbd %xmm2, %zmm2
+; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2
+; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpmovzxbd %xmm0, %zmm0
+; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1)
ret <64 x i8> %out
}