From: Matt Arsenault Date: Fri, 3 Oct 2014 23:54:41 +0000 (+0000) Subject: R600: Custom lower [s|u]int_to_fp for i64 -> f64 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=ad2f641c33bf41fb7cfec85e9a8e991ddeb110f9;p=oota-llvm.git R600: Custom lower [s|u]int_to_fp for i64 -> f64 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@219037 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index 9a5e39e8e30..c4ae6a9b91d 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -285,6 +285,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) : setOperationAction(ISD::UDIV, MVT::i32, Expand); setOperationAction(ISD::UREM, MVT::i32, Expand); setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); + setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); if (!Subtarget->hasFFBH()) @@ -555,6 +556,7 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, case ISD::FRINT: return LowerFRINT(Op, DAG); case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG); case ISD::FFLOOR: return LowerFFLOOR(Op, DAG); + case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); } return Op; @@ -1805,13 +1807,43 @@ SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); } +SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, + bool Signed) const { + SDLoc SL(Op); + SDValue Src = Op.getOperand(0); + + SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); + + SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, + DAG.getConstant(0, MVT::i32)); + SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, + DAG.getConstant(1, MVT::i32)); + + SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP, + SL, MVT::f64, Hi); + + SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo); + + SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi, + DAG.getConstant(32, MVT::i32)); + + return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo); +} + SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { SDValue S0 = Op.getOperand(0); - SDLoc DL(Op); - if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64) + if (S0.getValueType() != MVT::i64) return SDValue(); + EVT DestVT = Op.getValueType(); + if (DestVT == MVT::f64) + return LowerINT_TO_FP64(Op, DAG, false); + + assert(DestVT == MVT::f32); + + SDLoc DL(Op); + // f32 uint_to_fp i64 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0, DAG.getConstant(0, MVT::i32)); @@ -1824,6 +1856,15 @@ SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op, return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi); } +SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op, + SelectionDAG &DAG) const { + SDValue Src = Op.getOperand(0); + if (Src.getValueType() == MVT::i64 && Op.getValueType() == MVT::f64) + return LowerINT_TO_FP64(Op, DAG, true); + + return SDValue(); +} + SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op, unsigned BitsDiff, SelectionDAG &DAG) const { diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h index 911576b0afe..84cb52282f8 100644 --- a/lib/Target/R600/AMDGPUISelLowering.h +++ b/lib/Target/R600/AMDGPUISelLowering.h @@ -51,7 +51,9 @@ private: SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const; SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; SDValue ExpandSIGN_EXTEND_INREG(SDValue Op, unsigned BitsDiff, diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h index 9cf4dbcb2fd..9e50f0b2d69 100644 --- a/lib/Target/R600/SIISelLowering.h +++ b/lib/Target/R600/SIISelLowering.h @@ -37,6 +37,7 @@ class SITargetLowering : public AMDGPUTargetLowering { SDValue LowerFDIV32(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFDIV64(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool Signed) const; SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; diff --git a/test/CodeGen/R600/sint_to_fp.f64.ll b/test/CodeGen/R600/sint_to_fp.f64.ll new file mode 100644 index 00000000000..d2b3f12c1f3 --- /dev/null +++ b/test/CodeGen/R600/sint_to_fp.f64.ll @@ -0,0 +1,60 @@ +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s + +declare i32 @llvm.r600.read.tidig.x() nounwind readnone + +; SI-LABEL: {{^}}sint_to_fp_i32_to_f64 +; SI: V_CVT_F64_I32_e32 +define void @sint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) { + %result = sitofp i32 %in to double + store double %result, double addrspace(1)* %out + ret void +} + +; SI-LABEL: {{^}}sint_to_fp_i1_f64: +; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]], +; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs, +; we should be able to fold the SGPRs into the V_CNDMASK instructions. +; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]] +; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]] +; SI: BUFFER_STORE_DWORDX2 +; SI: S_ENDPGM +define void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) { + %cmp = icmp eq i32 %in, 0 + %fp = sitofp i1 %cmp to double + store double %fp, double addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}sint_to_fp_i1_f64_load: +; SI: V_CNDMASK_B32_e64 [[IRESULT:v[0-9]]], 0, -1 +; SI-NEXT: V_CVT_F64_I32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]] +; SI: BUFFER_STORE_DWORDX2 [[RESULT]] +; SI: S_ENDPGM +define void @sint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) { + %fp = sitofp i1 %in to double + store double %fp, double addrspace(1)* %out, align 8 + ret void +} + +; SI-LABEL: @s_sint_to_fp_i64_to_f64 +define void @s_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) { + %result = sitofp i64 %in to double + store double %result, double addrspace(1)* %out + ret void +} + +; SI-LABEL: @v_sint_to_fp_i64_to_f64 +; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}} +; SI-DAG: V_CVT_F64_U32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]] +; SI-DAG: V_CVT_F64_I32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]] +; SI: V_LDEXP_F64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32 +; SI: V_ADD_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]] +; SI: BUFFER_STORE_DWORDX2 [[RESULT]] +define void @v_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) { + %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep = getelementptr i64 addrspace(1)* %in, i32 %tid + %val = load i64 addrspace(1)* %gep, align 8 + %result = sitofp i64 %val to double + store double %result, double addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/sint_to_fp64.ll b/test/CodeGen/R600/sint_to_fp64.ll deleted file mode 100644 index 43889b616da..00000000000 --- a/test/CodeGen/R600/sint_to_fp64.ll +++ /dev/null @@ -1,35 +0,0 @@ -; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s - -; SI: {{^}}sint_to_fp64: -; SI: V_CVT_F64_I32_e32 -define void @sint_to_fp64(double addrspace(1)* %out, i32 %in) { - %result = sitofp i32 %in to double - store double %result, double addrspace(1)* %out - ret void -} - -; SI-LABEL: {{^}}sint_to_fp_i1_f64: -; SI: V_CMP_EQ_I32_e64 [[CMP:s\[[0-9]+:[0-9]\]]], -; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs, -; we should be able to fold the SGPRs into the V_CNDMASK instructions. -; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]] -; SI: V_CNDMASK_B32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]] -; SI: BUFFER_STORE_DWORDX2 -; SI: S_ENDPGM -define void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) { - %cmp = icmp eq i32 %in, 0 - %fp = sitofp i1 %cmp to double - store double %fp, double addrspace(1)* %out, align 4 - ret void -} - -; SI-LABEL: {{^}}sint_to_fp_i1_f64_load: -; SI: V_CNDMASK_B32_e64 [[IRESULT:v[0-9]]], 0, -1 -; SI-NEXT: V_CVT_F64_I32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]] -; SI: BUFFER_STORE_DWORDX2 [[RESULT]] -; SI: S_ENDPGM -define void @sint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) { - %fp = sitofp i1 %in to double - store double %fp, double addrspace(1)* %out, align 8 - ret void -} diff --git a/test/CodeGen/R600/uint_to_fp.f64.ll b/test/CodeGen/R600/uint_to_fp.f64.ll index 19dbbee8354..2e926d6999a 100644 --- a/test/CodeGen/R600/uint_to_fp.f64.ll +++ b/test/CodeGen/R600/uint_to_fp.f64.ll @@ -1,6 +1,8 @@ ; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s -; SI-LABEL: {{^}}uint_to_fp_f64_i32: +declare i32 @llvm.r600.read.tidig.x() nounwind readnone + +; SI-LABEL: {{$}}uint_to_fp_f64_i32 ; SI: V_CVT_F64_U32_e32 ; SI: S_ENDPGM define void @uint_to_fp_f64_i32(double addrspace(1)* %out, i32 %in) { @@ -34,3 +36,40 @@ define void @uint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) { store double %fp, double addrspace(1)* %out, align 8 ret void } + +; SI-LABEL: {{$}}v_uint_to_fp_i64_to_f64 +; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}} +; SI-DAG: V_CVT_F64_U32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]] +; SI-DAG: V_CVT_F64_U32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]] +; SI: V_LDEXP_F64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32 +; SI: V_ADD_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]] +; SI: BUFFER_STORE_DWORDX2 [[RESULT]] +define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) { + %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone + %gep = getelementptr i64 addrspace(1)* %in, i32 %tid + %val = load i64 addrspace(1)* %gep, align 8 + %result = uitofp i64 %val to double + store double %result, double addrspace(1)* %out + ret void +} + +; SI-LABEL: {{$}}s_uint_to_fp_f64_i64 +define void @s_uint_to_fp_f64_i64(double addrspace(1)* %out, i64 %in) { + %cast = uitofp i64 %in to double + store double %cast, double addrspace(1)* %out, align 8 + ret void +} + +; SI-LABEL: {{$}}s_uint_to_fp_v2f64_v2i64 +define void @s_uint_to_fp_v2f64_v2i64(<2 x double> addrspace(1)* %out, <2 x i64> %in) { + %cast = uitofp <2 x i64> %in to <2 x double> + store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16 + ret void +} + +; SI-LABEL: {{$}}s_uint_to_fp_v4f64_v4i64 +define void @s_uint_to_fp_v4f64_v4i64(<4 x double> addrspace(1)* %out, <4 x i64> %in) { + %cast = uitofp <4 x i64> %in to <4 x double> + store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16 + ret void +}