From: Simon Pilgrim Date: Mon, 8 Jun 2015 09:57:09 +0000 (+0000) Subject: [DAGCombiner] Added CTTZ vector constant folding support. X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=d72b35710738a3655a3cea808e71659d8ec9d82b;p=oota-llvm.git [DAGCombiner] Added CTTZ vector constant folding support. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@239293 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 307bfa502b7..4b9167723f4 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4789,7 +4789,7 @@ SDValue DAGCombiner::visitCTTZ(SDNode *N) { EVT VT = N->getValueType(0); // fold (cttz c1) -> c2 - if (isa(N0)) + if (isConstantIntBuildVectorOrConstantInt(N0)) return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0); return SDValue(); } @@ -4799,7 +4799,7 @@ SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) { EVT VT = N->getValueType(0); // fold (cttz_zero_undef c1) -> c2 - if (isa(N0)) + if (isConstantIntBuildVectorOrConstantInt(N0)) return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0); return SDValue(); } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index f72bfc6ceaa..92b6a0029b4 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2911,6 +2911,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, case ISD::TRUNCATE: case ISD::UINT_TO_FP: case ISD::SINT_TO_FP: + case ISD::CTTZ: + case ISD::CTTZ_ZERO_UNDEF: case ISD::CTPOP: { EVT SVT = VT.getScalarType(); EVT InVT = BV->getValueType(0); diff --git a/test/CodeGen/X86/vector-tzcnt-128.ll b/test/CodeGen/X86/vector-tzcnt-128.ll index bfa398b011c..422fe052d38 100644 --- a/test/CodeGen/X86/vector-tzcnt-128.ll +++ b/test/CodeGen/X86/vector-tzcnt-128.ll @@ -1666,6 +1666,122 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) { ret <16 x i8> %out } +define <2 x i64> @foldv2i64() { +; SSE-LABEL: foldv2i64: +; SSE: # BB#0: +; SSE-NEXT: movl $8, %eax +; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: foldv2i64: +; AVX: # BB#0: +; AVX-NEXT: movl $8, %eax +; AVX-NEXT: vmovq %rax, %xmm0 +; AVX-NEXT: retq + %out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> , i1 0) + ret <2 x i64> %out +} + +define <2 x i64> @foldv2i64u() { +; SSE-LABEL: foldv2i64u: +; SSE: # BB#0: +; SSE-NEXT: movl $8, %eax +; SSE-NEXT: movd %rax, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: foldv2i64u: +; AVX: # BB#0: +; AVX-NEXT: movl $8, %eax +; AVX-NEXT: vmovq %rax, %xmm0 +; AVX-NEXT: retq + %out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> , i1 -1) + ret <2 x i64> %out +} + +define <4 x i32> @foldv4i32() { +; SSE-LABEL: foldv4i32: +; SSE: # BB#0: +; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0] +; SSE-NEXT: retq +; +; AVX-LABEL: foldv4i32: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0] +; AVX-NEXT: retq + %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> , i1 0) + ret <4 x i32> %out +} + +define <4 x i32> @foldv4i32u() { +; SSE-LABEL: foldv4i32u: +; SSE: # BB#0: +; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0] +; SSE-NEXT: retq +; +; AVX-LABEL: foldv4i32u: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0] +; AVX-NEXT: retq + %out = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> , i1 -1) + ret <4 x i32> %out +} + +define <8 x i16> @foldv8i16() { +; SSE-LABEL: foldv8i16: +; SSE: # BB#0: +; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3] +; SSE-NEXT: retq +; +; AVX-LABEL: foldv8i16: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3] +; AVX-NEXT: retq + %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> , i1 0) + ret <8 x i16> %out +} + +define <8 x i16> @foldv8i16u() { +; SSE-LABEL: foldv8i16u: +; SSE: # BB#0: +; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3] +; SSE-NEXT: retq +; +; AVX-LABEL: foldv8i16u: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3] +; AVX-NEXT: retq + %out = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> , i1 -1) + ret <8 x i16> %out +} + +define <16 x i8> @foldv16i8() { +; SSE-LABEL: foldv16i8: +; SSE: # BB#0: +; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5] +; SSE-NEXT: retq +; +; AVX-LABEL: foldv16i8: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5] +; AVX-NEXT: retq + %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> , i1 0) + ret <16 x i8> %out +} + +define <16 x i8> @foldv16i8u() { +; SSE-LABEL: foldv16i8u: +; SSE: # BB#0: +; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5] +; SSE-NEXT: retq +; +; AVX-LABEL: foldv16i8u: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5] +; AVX-NEXT: retq + %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> , i1 -1) + ret <16 x i8> %out +} + declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1) declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>, i1) declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>, i1) diff --git a/test/CodeGen/X86/vector-tzcnt-256.ll b/test/CodeGen/X86/vector-tzcnt-256.ll index 32f7bcb00c5..8f744f79f85 100644 --- a/test/CodeGen/X86/vector-tzcnt-256.ll +++ b/test/CodeGen/X86/vector-tzcnt-256.ll @@ -1117,6 +1117,78 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) { ret <32 x i8> %out } +define <4 x i64> @foldv4i64() { +; AVX-LABEL: foldv4i64: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0] +; AVX-NEXT: retq + %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> , i1 0) + ret <4 x i64> %out +} + +define <4 x i64> @foldv4i64u() { +; AVX-LABEL: foldv4i64u: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0] +; AVX-NEXT: retq + %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> , i1 -1) + ret <4 x i64> %out +} + +define <8 x i32> @foldv8i32() { +; AVX-LABEL: foldv8i32: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3] +; AVX-NEXT: retq + %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> , i1 0) + ret <8 x i32> %out +} + +define <8 x i32> @foldv8i32u() { +; AVX-LABEL: foldv8i32u: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3] +; AVX-NEXT: retq + %out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> , i1 -1) + ret <8 x i32> %out +} + +define <16 x i16> @foldv16i16() { +; AVX-LABEL: foldv16i16: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5] +; AVX-NEXT: retq + %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> , i1 0) + ret <16 x i16> %out +} + +define <16 x i16> @foldv16i16u() { +; AVX-LABEL: foldv16i16u: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5] +; AVX-NEXT: retq + %out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> , i1 -1) + ret <16 x i16> %out +} + +define <32 x i8> @foldv32i8() { +; AVX-LABEL: foldv32i8: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0] +; AVX-NEXT: retq + %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> , i1 0) + ret <32 x i8> %out +} + +define <32 x i8> @foldv32i8u() { +; AVX-LABEL: foldv32i8u: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0] +; AVX-NEXT: retq + %out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> , i1 -1) + ret <32 x i8> %out +} + declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>, i1) declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>, i1) declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>, i1)