From: Simon Pilgrim Date: Sat, 2 May 2015 13:04:07 +0000 (+0000) Subject: [DAGCombiner] Enabled vector float/double -> int constant folding X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=d85813d9a5029542ddbd0e8d533fbb1cde9046f0;p=oota-llvm.git [DAGCombiner] Enabled vector float/double -> int constant folding git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@236387 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 81e4a813367..9319b81d00e 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -8549,11 +8549,10 @@ static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) { SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) { SDValue N0 = N->getOperand(0); - ConstantFPSDNode *N0CFP = dyn_cast(N0); EVT VT = N->getValueType(0); // fold (fp_to_sint c1fp) -> c1 - if (N0CFP) + if (isConstantFPBuildVectorOrConstantFP(N0)) return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0); return FoldIntToFPToInt(N, DAG); @@ -8561,11 +8560,10 @@ SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) { SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) { SDValue N0 = N->getOperand(0); - ConstantFPSDNode *N0CFP = dyn_cast(N0); EVT VT = N->getValueType(0); // fold (fp_to_uint c1fp) -> c1 - if (N0CFP) + if (isConstantFPBuildVectorOrConstantFP(N0)) return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0); return FoldIntToFPToInt(N, DAG); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 9d403a6948c..2afde5fbff6 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2864,6 +2864,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, case ISD::FTRUNC: case ISD::FFLOOR: case ISD::FP_EXTEND: + case ISD::FP_TO_SINT: + case ISD::FP_TO_UINT: case ISD::TRUNCATE: case ISD::UINT_TO_FP: case ISD::SINT_TO_FP: { diff --git a/test/CodeGen/X86/vec_fp_to_int.ll b/test/CodeGen/X86/vec_fp_to_int.ll index 9f36167301d..9f1c7afa295 100644 --- a/test/CodeGen/X86/vec_fp_to_int.ll +++ b/test/CodeGen/X86/vec_fp_to_int.ll @@ -745,3 +745,211 @@ define <4 x i64> @fptoui_8vf32_i64(<8 x float> %a) { %cvt = fptoui <4 x float> %shuf to <4 x i64> ret <4 x i64> %cvt } + +; +; Constant Folding +; + +define <2 x i64> @fptosi_2vf64c() { +; SSE2-LABEL: fptosi_2vf64c: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptosi_2vf64c: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,18446744073709551615] +; AVX-NEXT: retq + %cvt = fptosi <2 x double> to <2 x i64> + ret <2 x i64> %cvt +} + +define <4 x i32> @fptosi_2vf64c_i32() { +; SSE2-LABEL: fptosi_2vf64c_i32: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = <4294967295,1,u,u> +; SSE2-NEXT: retq +; +; AVX-LABEL: fptosi_2vf64c_i32: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <4294967295,1,u,u> +; AVX-NEXT: retq + %cvt = fptosi <2 x double> to <2 x i32> + %ext = shufflevector <2 x i32> %cvt, <2 x i32> undef, <4 x i32> + ret <4 x i32> %ext +} + +define <4 x i64> @fptosi_4vf64c() { +; SSE2-LABEL: fptosi_4vf64c: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615] +; SSE2-NEXT: movaps {{.*#+}} xmm1 = [2,18446744073709551613] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptosi_4vf64c: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,18446744073709551615,2,18446744073709551613] +; AVX-NEXT: retq + %cvt = fptosi <4 x double> to <4 x i64> + ret <4 x i64> %cvt +} + +define <4 x i32> @fptosi_4vf64c_i32() { +; SSE2-LABEL: fptosi_4vf64c_i32: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [4294967295,1,4294967294,3] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptosi_4vf64c_i32: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,1,4294967294,3] +; AVX-NEXT: retq + %cvt = fptosi <4 x double> to <4 x i32> + ret <4 x i32> %cvt +} + +define <2 x i64> @fptoui_2vf64c() { +; SSE2-LABEL: fptoui_2vf64c: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [2,4] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptoui_2vf64c: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [2,4] +; AVX-NEXT: retq + %cvt = fptoui <2 x double> to <2 x i64> + ret <2 x i64> %cvt +} + +define <4 x i32> @fptoui_2vf64c_i32(<2 x double> %a) { +; SSE2-LABEL: fptoui_2vf64c_i32: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = <2,4,u,u> +; SSE2-NEXT: retq +; +; AVX-LABEL: fptoui_2vf64c_i32: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <2,4,u,u> +; AVX-NEXT: retq + %cvt = fptoui <2 x double> to <2 x i32> + %ext = shufflevector <2 x i32> %cvt, <2 x i32> undef, <4 x i32> + ret <4 x i32> %ext +} + +define <4 x i64> @fptoui_4vf64c(<4 x double> %a) { +; SSE2-LABEL: fptoui_4vf64c: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [2,4] +; SSE2-NEXT: movaps {{.*#+}} xmm1 = [6,8] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptoui_4vf64c: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [2,4,6,8] +; AVX-NEXT: retq + %cvt = fptoui <4 x double> to <4 x i64> + ret <4 x i64> %cvt +} + +define <4 x i32> @fptoui_4vf64c_i32(<4 x double> %a) { +; SSE2-LABEL: fptoui_4vf64c_i32: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [2,4,6,8] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptoui_4vf64c_i32: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [2,4,6,8] +; AVX-NEXT: retq + %cvt = fptoui <4 x double> to <4 x i32> + ret <4 x i32> %cvt +} + +define <4 x i32> @fptosi_4vf32c() { +; SSE2-LABEL: fptosi_4vf32c: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,4294967295,2,3] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptosi_4vf32c: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,4294967295,2,3] +; AVX-NEXT: retq + %cvt = fptosi <4 x float> to <4 x i32> + ret <4 x i32> %cvt +} + +define <4 x i64> @fptosi_4vf32c_i64() { +; SSE2-LABEL: fptosi_4vf32c_i64: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,18446744073709551615] +; SSE2-NEXT: movaps {{.*#+}} xmm1 = [2,3] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptosi_4vf32c_i64: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,18446744073709551615,2,3] +; AVX-NEXT: retq + %cvt = fptosi <4 x float> to <4 x i64> + ret <4 x i64> %cvt +} + +define <8 x i32> @fptosi_8vf32c(<8 x float> %a) { +; SSE2-LABEL: fptosi_8vf32c: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,4294967295,2,3] +; SSE2-NEXT: movaps {{.*#+}} xmm1 = [6,4294967288,2,4294967295] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptosi_8vf32c: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,4294967295,2,3,6,4294967288,2,4294967295] +; AVX-NEXT: retq + %cvt = fptosi <8 x float> to <8 x i32> + ret <8 x i32> %cvt +} + +define <4 x i32> @fptoui_4vf32c(<4 x float> %a) { +; SSE2-LABEL: fptoui_4vf32c: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,2,4,6] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptoui_4vf32c: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,2,4,6] +; AVX-NEXT: retq + %cvt = fptoui <4 x float> to <4 x i32> + ret <4 x i32> %cvt +} + +define <4 x i64> @fptoui_4vf32c_i64() { +; SSE2-LABEL: fptoui_4vf32c_i64: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,2] +; SSE2-NEXT: movaps {{.*#+}} xmm1 = [4,8] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptoui_4vf32c_i64: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,8] +; AVX-NEXT: retq + %cvt = fptoui <4 x float> to <4 x i64> + ret <4 x i64> %cvt +} + +define <8 x i32> @fptoui_8vf32c(<8 x float> %a) { +; SSE2-LABEL: fptoui_8vf32c: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm0 = [1,2,4,6] +; SSE2-NEXT: movaps {{.*#+}} xmm1 = [8,6,4,1] +; SSE2-NEXT: retq +; +; AVX-LABEL: fptoui_8vf32c: +; AVX: # BB#0: +; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,6,8,6,4,1] +; AVX-NEXT: retq + %cvt = fptoui <8 x float> to <8 x i32> + ret <8 x i32> %cvt +}