From b40562c0ecc0987855fb8ba045e9f538179e5895 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 12 Aug 2014 17:31:17 +0000 Subject: [PATCH] R600: Use i24 optimized path for SREM v2: add tests rename LowerSDIV24 to LowerSDIVREM24 handle the rem part in this function Signed-off-by: Jan Vesely git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@215460 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/R600/AMDGPUISelLowering.cpp | 34 +++-- lib/Target/R600/AMDGPUISelLowering.h | 2 +- test/CodeGen/R600/{sdiv24.ll => sdivrem24.ll} | 118 ++++++++++++++++++ 3 files changed, 146 insertions(+), 8 deletions(-) rename test/CodeGen/R600/{sdiv24.ll => sdivrem24.ll} (51%) diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index 89c171b3a9a..396f8e52e01 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -250,7 +250,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) : const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; for (MVT VT : ScalarIntVTs) { setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::SDIV, VT, Custom); + setOperationAction(ISD::SDIV, VT, Expand); // GPU does not have divrem function for signed or unsigned. setOperationAction(ISD::SDIVREM, VT, Custom); @@ -1390,7 +1390,7 @@ SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { // This is a shortcut for integer division because we have fast i32<->f32 // conversions, and fast f32 reciprocal instructions. The fractional part of a // float is enough to accurately represent up to a 24-bit integer. -SDValue AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const { +SDValue AMDGPUTargetLowering::LowerSDIVREM24(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); EVT VT = Op.getValueType(); SDValue LHS = Op.getOperand(0); @@ -1463,7 +1463,17 @@ SDValue AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const { // dst = iq + jq; iq = DAG.getSExtOrTrunc(iq, DL, VT); - return DAG.getNode(ISD::ADD, DL, VT, iq, jq); + + SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq); + + SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS); + Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem); + + SDValue Res[2] = { + Div, + Rem + }; + return DAG.getMergeValues(Res, DL); } SDValue AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const { @@ -1544,7 +1554,7 @@ SDValue AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const { // TODO: We technically could do this for i64, but shouldn't that just be // handled by something generally reducing 64-bit division on 32-bit // values to 32-bit? - return LowerSDIV24(Op, DAG); +// return LowerSDIV24(Op, DAG); } return LowerSDIV32(Op, DAG); @@ -1740,12 +1750,22 @@ SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op, SDLoc DL(Op); EVT VT = Op.getValueType(); - SDValue Zero = DAG.getConstant(0, VT); - SDValue NegOne = DAG.getConstant(-1, VT); - SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); + if (VT == MVT::i32) { + if (DAG.ComputeNumSignBits(Op.getOperand(0)) > 8 && + DAG.ComputeNumSignBits(Op.getOperand(1)) > 8) { + // TODO: We technically could do this for i64, but shouldn't that just be + // handled by something generally reducing 64-bit division on 32-bit + // values to 32-bit? + return LowerSDIVREM24(Op, DAG); + } + } + + SDValue Zero = DAG.getConstant(0, VT); + SDValue NegOne = DAG.getConstant(-1, VT); + SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT); SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT); SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign); diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h index 769ea97830d..58bb4818a02 100644 --- a/lib/Target/R600/AMDGPUISelLowering.h +++ b/lib/Target/R600/AMDGPUISelLowering.h @@ -44,7 +44,6 @@ private: /// \returns The resulting chain. SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerSDIV24(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSDIV32(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSDIV64(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSREM(SDValue Op, SelectionDAG &DAG) const; @@ -89,6 +88,7 @@ protected: SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSDIVREM24(SDValue Op, SelectionDAG &DAG) const; bool isHWTrueValue(SDValue Op) const; bool isHWFalseValue(SDValue Op) const; diff --git a/test/CodeGen/R600/sdiv24.ll b/test/CodeGen/R600/sdivrem24.ll similarity index 51% rename from test/CodeGen/R600/sdiv24.ll rename to test/CodeGen/R600/sdivrem24.ll index 84c9ecbbfda..55f6d2c09ee 100644 --- a/test/CodeGen/R600/sdiv24.ll +++ b/test/CodeGen/R600/sdivrem24.ll @@ -118,3 +118,121 @@ define void @test_no_sdiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) store i32 %result, i32 addrspace(1)* %out, align 4 ret void } + +; FUNC-LABEL: @srem24_i8 +; SI: V_CVT_F32_I32 +; SI: V_CVT_F32_I32 +; SI: V_RCP_F32 +; SI: V_CVT_I32_F32 + +; EG: INT_TO_FLT +; EG-DAG: INT_TO_FLT +; EG-DAG: RECIP_IEEE +; EG: FLT_TO_INT +define void @srem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) { + %den_ptr = getelementptr i8 addrspace(1)* %in, i8 1 + %num = load i8 addrspace(1) * %in + %den = load i8 addrspace(1) * %den_ptr + %result = srem i8 %num, %den + store i8 %result, i8 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @srem24_i16 +; SI: V_CVT_F32_I32 +; SI: V_CVT_F32_I32 +; SI: V_RCP_F32 +; SI: V_CVT_I32_F32 + +; EG: INT_TO_FLT +; EG-DAG: INT_TO_FLT +; EG-DAG: RECIP_IEEE +; EG: FLT_TO_INT +define void @srem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) { + %den_ptr = getelementptr i16 addrspace(1)* %in, i16 1 + %num = load i16 addrspace(1) * %in, align 2 + %den = load i16 addrspace(1) * %den_ptr, align 2 + %result = srem i16 %num, %den + store i16 %result, i16 addrspace(1)* %out, align 2 + ret void +} + +; FUNC-LABEL: @srem24_i32 +; SI: V_CVT_F32_I32 +; SI: V_CVT_F32_I32 +; SI: V_RCP_F32 +; SI: V_CVT_I32_F32 + +; EG: INT_TO_FLT +; EG-DAG: INT_TO_FLT +; EG-DAG: RECIP_IEEE +; EG: FLT_TO_INT +define void @srem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { + %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1 + %num = load i32 addrspace(1) * %in, align 4 + %den = load i32 addrspace(1) * %den_ptr, align 4 + %num.i24.0 = shl i32 %num, 8 + %den.i24.0 = shl i32 %den, 8 + %num.i24 = ashr i32 %num.i24.0, 8 + %den.i24 = ashr i32 %den.i24.0, 8 + %result = srem i32 %num.i24, %den.i24 + store i32 %result, i32 addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: @srem25_i32 +; SI-NOT: V_CVT_F32_I32 +; SI-NOT: V_RCP_F32 + +; EG-NOT: INT_TO_FLT +; EG-NOT: RECIP_IEEE +define void @srem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { + %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1 + %num = load i32 addrspace(1) * %in, align 4 + %den = load i32 addrspace(1) * %den_ptr, align 4 + %num.i24.0 = shl i32 %num, 7 + %den.i24.0 = shl i32 %den, 7 + %num.i24 = ashr i32 %num.i24.0, 7 + %den.i24 = ashr i32 %den.i24.0, 7 + %result = srem i32 %num.i24, %den.i24 + store i32 %result, i32 addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: @test_no_srem24_i32_1 +; SI-NOT: V_CVT_F32_I32 +; SI-NOT: V_RCP_F32 + +; EG-NOT: INT_TO_FLT +; EG-NOT: RECIP_IEEE +define void @test_no_srem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { + %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1 + %num = load i32 addrspace(1) * %in, align 4 + %den = load i32 addrspace(1) * %den_ptr, align 4 + %num.i24.0 = shl i32 %num, 8 + %den.i24.0 = shl i32 %den, 7 + %num.i24 = ashr i32 %num.i24.0, 8 + %den.i24 = ashr i32 %den.i24.0, 7 + %result = srem i32 %num.i24, %den.i24 + store i32 %result, i32 addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: @test_no_srem24_i32_2 +; SI-NOT: V_CVT_F32_I32 +; SI-NOT: V_RCP_F32 + +; EG-NOT: INT_TO_FLT +; EG-NOT: RECIP_IEEE +define void @test_no_srem24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { + %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1 + %num = load i32 addrspace(1) * %in, align 4 + %den = load i32 addrspace(1) * %den_ptr, align 4 + %num.i24.0 = shl i32 %num, 7 + %den.i24.0 = shl i32 %den, 8 + %num.i24 = ashr i32 %num.i24.0, 7 + %den.i24 = ashr i32 %den.i24.0, 8 + %result = srem i32 %num.i24, %den.i24 + store i32 %result, i32 addrspace(1)* %out, align 4 + ret void +} -- 2.34.1