From: Matt Arsenault Date: Fri, 15 Aug 2014 18:42:18 +0000 (+0000) Subject: R600/SI: Use source modifiers for f64 fneg X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=0498d072551c38feb7b9f3f38dad3d20ac7b056a;p=oota-llvm.git R600/SI: Use source modifiers for f64 fneg git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@215748 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index 6d7438399fe..7d4aadcc5ca 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -446,7 +446,7 @@ bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { assert(VT.isFloatingPoint()); - return VT == MVT::f32; + return VT == MVT::f32 || VT == MVT::f64; } bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index 508ed2a9a9a..1d5b43f5954 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -223,10 +223,6 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : setOperationAction(ISD::FRINT, MVT::f64, Legal); } - // FIXME: These should be removed and handled the same was as f32 fneg. Source - // modifiers also work for the double instructions. - setOperationAction(ISD::FNEG, MVT::f64, Expand); - setOperationAction(ISD::FDIV, MVT::f32, Custom); setTargetDAGCombine(ISD::SELECT_CC); @@ -701,6 +697,7 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( unsigned DestReg = MI->getOperand(0).getReg(); unsigned Reg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass); + // FIXME: Should use SALU instructions BuildMI(*BB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Reg) .addImm(0x80000000); BuildMI(*BB, I, DL, TII->get(AMDGPU::V_XOR_B32_e32), DestReg) @@ -709,6 +706,33 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( MI->eraseFromParent(); break; } + case AMDGPU::FNEG64_SI: { + MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); + const SIInstrInfo *TII = static_cast( + getTargetMachine().getSubtargetImpl()->getInstrInfo()); + + DebugLoc DL = MI->getDebugLoc(); + unsigned SrcReg = MI->getOperand(1).getReg(); + unsigned DestReg = MI->getOperand(0).getReg(); + + unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass); + unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass); + + // FIXME: Should use SALU instructions + BuildMI(*BB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), ImmReg) + .addImm(0x80000000); + BuildMI(*BB, I, DL, TII->get(AMDGPU::V_XOR_B32_e32), TmpReg) + .addReg(SrcReg, 0, AMDGPU::sub1) + .addReg(ImmReg); + + BuildMI(*BB, I, DL, TII->get(AMDGPU::REG_SEQUENCE), DestReg) + .addReg(SrcReg, 0, AMDGPU::sub0) + .addImm(AMDGPU::sub0) + .addReg(TmpReg) + .addImm(AMDGPU::sub1); + MI->eraseFromParent(); + break; + } case AMDGPU::FCLAMP_SI: { const SIInstrInfo *TII = static_cast( getTargetMachine().getSubtargetImpl()->getInstrInfo()); diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td index af8d3b3d080..8d2c212dc15 100644 --- a/lib/Target/R600/SIInstructions.td +++ b/lib/Target/R600/SIInstructions.td @@ -2328,12 +2328,20 @@ def : Pat < // TODO: Look into not implementing isFNegFree/isFAbsFree for SI, and possibly // removing these patterns - def : Pat < (fneg (fabs f32:$src)), (V_OR_B32_e32 $src, (V_MOV_B32_e32 0x80000000)) /* Set sign bit */ >; +def : Pat < + (fneg (fabs f64:$src)), + (f64 (INSERT_SUBREG + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), + (i32 (EXTRACT_SUBREG f64:$src, sub0)), sub0), + (V_OR_B32_e32 (S_MOV_B32 0x80000000), + (EXTRACT_SUBREG f64:$src, sub1)), sub1)) // Set sign bit. +>; + class SIUnaryCustomInsertInst addrspace(1)* %out, <2 x double> %in) { + %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in) + %fsub = fsub <2 x double> , %fabs + store <2 x double> %fsub, <2 x double> addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @fneg_fabs_v4f64 +; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}} +; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}} +; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}} +; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}} +define void @fneg_fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) { + %fabs = call <4 x double> @llvm.fabs.v4f64(<4 x double> %in) + %fsub = fsub <4 x double> , %fabs + store <4 x double> %fsub, <4 x double> addrspace(1)* %out + ret void +} + +declare double @fabs(double) readnone +declare double @llvm.fabs.f64(double) readnone +declare <2 x double> @llvm.fabs.v2f64(<2 x double>) readnone +declare <4 x double> @llvm.fabs.v4f64(<4 x double>) readnone diff --git a/test/CodeGen/R600/fneg.f64.ll b/test/CodeGen/R600/fneg.f64.ll new file mode 100644 index 00000000000..61d95135a4a --- /dev/null +++ b/test/CodeGen/R600/fneg.f64.ll @@ -0,0 +1,59 @@ +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s + +; FUNC-LABEL: @fneg_f64 +; SI: V_XOR_B32 +define void @fneg_f64(double addrspace(1)* %out, double %in) { + %fneg = fsub double -0.000000e+00, %in + store double %fneg, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @fneg_v2f64 +; SI: V_XOR_B32 +; SI: V_XOR_B32 +define void @fneg_v2f64(<2 x double> addrspace(1)* nocapture %out, <2 x double> %in) { + %fneg = fsub <2 x double> , %in + store <2 x double> %fneg, <2 x double> addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @fneg_v4f64 +; R600: -PV +; R600: -T +; R600: -PV +; R600: -PV + +; SI: V_XOR_B32 +; SI: V_XOR_B32 +; SI: V_XOR_B32 +; SI: V_XOR_B32 +define void @fneg_v4f64(<4 x double> addrspace(1)* nocapture %out, <4 x double> %in) { + %fneg = fsub <4 x double> , %in + store <4 x double> %fneg, <4 x double> addrspace(1)* %out + ret void +} + +; DAGCombiner will transform: +; (fneg (f64 bitcast (i64 a))) => (f64 bitcast (xor (i64 a), 0x80000000)) +; unless the target returns true for isNegFree() + +; FUNC-LABEL: @fneg_free_f64 +; FIXME: Unnecessary copy to VGPRs +; SI: V_ADD_F64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, -{{v\[[0-9]+:[0-9]+\]}}, 0, 0 +define void @fneg_free_f64(double addrspace(1)* %out, i64 %in) { + %bc = bitcast i64 %in to double + %fsub = fsub double 0.0, %bc + store double %fsub, double addrspace(1)* %out + ret void +} + +; SI-LABEL: @fneg_fold +; SI: S_LOAD_DWORDX2 [[NEG_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb +; SI-NOT: XOR +; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, -[[NEG_VALUE]], {{v\[[0-9]+:[0-9]+\]}} +define void @fneg_fold_f64(double addrspace(1)* %out, double %in) { + %fsub = fsub double -0.0, %in + %fmul = fmul double %fsub, %in + store double %fmul, double addrspace(1)* %out + ret void +}